repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
cmfeng/class_project
|
Analysis/data_cleaning_utils.py
|
1
|
2753
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
"""
Importing the data.
This step is key, as we have to ensure that the time format
of the data is recognized
"""
datecol = ""
def import_data(datafile):
raw_data = pd.read_csv(datafile, parse_dates=True,
infer_datetime_format=True, thousands=",", encoding='cp1252')
print(raw_data.columns)
global datecol # Needed to make datetime column global
datecol = input('What is your datetime column?')
# Need to add code to catch errors and ensure this is in datetime format
raw_data[datecol] = pd.to_datetime(raw_data[datecol])
raw_data.index = raw_data[datecol]
#Need to add code to catch errors and ensure this is in datetime format
return raw_data
"""
Defining the first function to smooth the data
This will return the dataframe with the smoothed data overwriting the raw
it will also return a plot of the smoothed data overlaid on the raw data
"""
def test_smooth_data(column, raw_input):
#This asks the user for what size window to average over
window = int(input("What size windows do you want for the moving average? "))
columns = []
data = raw_input.copy()
for x in data.columns[1:]:
columns.append(x)
for x in columns:
if (type(data[x][0]) != str):
try:
moving = pd.rolling_mean(data[x], window)
data[x] = moving
except ValueError:
print("Value Error")
fig, ax = plt.subplots(2, figsize=(14, 6), sharex=True)
raw_input[column].plot(ax=ax[0], title="RAW")
data[column].plot(ax=ax[1], title=str(window) + " s moving average")
return data
"""
Defining the function to reuce the size of the data
"""
def reducer(data):
freq = input("What frequency would you like to resample to? Format = XS(seconds), XT(minutes)")
#This will resample the data. If downsampling, it will take the mean of the
#points. If upsampling, it will fill backwards
columns = []
for x in data.columns[:]:
columns.append(x)
for x in columns:
if (type(data[x][0]) is str):
try:
for i in range(len(data[x])):
print(data[x][i])
data[x][i] = float(data[x][i].replace(',',''))
except ValueError:
print('Value Error')
resampled = data.resample(freq)
print('Num Samples Before: ' + str(data.size))
print('Num Samples After: ' + str(resampled.size))
return resampled
"""
Remove null values
"""
def nullRemover(datainput):
cleanedoutput = datainput.dropna()
print(datainput.shape)
print(cleanedoutput.shape)
return cleanedoutput
|
mit
|
automl/pysmac
|
examples/sklearn_example.py
|
1
|
3555
|
from __future__ import print_function, division
import pysmac
import sklearn.ensemble
import sklearn.datasets
import sklearn.cross_validation
# First, let us generate some random classification problem. Note that, due to the
# way pysmac implements parallelism, the data is either a global variable, or
# the function loads it itself. Please refere to the python manual about the
# multiprocessing module for limitations. In the future, we might include additional
# parameters to the function, but for now that is not possible.
X,Y = sklearn.datasets.make_classification(1000, 20, random_state=2) # seed yields a mediocre initial accuracy on my machine
X_train, X_test, Y_train, Y_test = sklearn.cross_validation.train_test_split(X,Y, test_size=0.33, random_state=1)
# The function to be minimezed for this example is the mean accuracy of a random
# forest on the test data set. Note: because SMAC minimizes the objective, we return
# the negative accuracy in order to maximize it.
def random_forest(n_estimators,criterion, max_features, max_depth):
predictor = sklearn.ensemble.RandomForestClassifier(n_estimators=n_estimators, criterion=criterion, max_features=max_features, max_depth=max_depth)
predictor.fit(X_train, Y_train)
return -predictor.score(X_test, Y_test)
parameter_definition=dict(\
max_depth =("integer", [1,10], 4),
max_features=("integer", [1,20], 10),
n_estimators=("integer", [10,100], 10, 'log'),
criterion =("categorical", ['gini', 'entropy'], 'entropy'),
)
# a litle bit of explanation: the first two lines define integer parameters
# ranging from 1 to 10/20 with some default values. The third line also defines
# a integer parameter, but the additional 'log' string tells SMAC to vary it
# uniformly on a logarithmic scale. Here it means, that 1<=n_estimators<=10 is
# as likely as 10<n_estimators<=100.
# The last line defines a categorical parameter. For now ,the values are always
# treated as strings. This means you would have to cast that inside your function
# when this is not appropriate, e.g., when discretizing an interval.
# Now we create the optimizer object again. This time with some parameters
opt = pysmac.SMAC_optimizer( working_directory = '/tmp/pysmac_test/',# the folder where SMAC generates output
persistent_files=False, # whether the output will persist beyond the python object's lifetime
debug = False # if something goes wrong, enable this for diagnostic output
)
# first we try the sklearn default, so we can see if SMAC can improve the performance
predictor = sklearn.ensemble.RandomForestClassifier()
predictor.fit(X_train, Y_train)
print(('The default accuracy is %f'%predictor.score(X_test, Y_test)))
# The minimize method also has optional arguments
value, parameters = opt.minimize(random_forest,
100 , parameter_definition, # in a real setting, you probably want to do more than 100 evaluations here
num_runs = 2, # number of independent SMAC runs
seed = 2, # the random seed used. can be an int or a list of ints of length num_runs
num_procs = 2, # pysmac can harness multicore architecture. Specify the number of processes to use here.
mem_limit_function_mb=1000, # There are a build-in mechanisms to limit the resources available to each function call:
t_limit_function_s = 20 # You can limit the memory available and the wallclock time for each function call
)
print(('The highest accuracy found: %f'%(-value)))
print(('Parameter setting %s'%parameters))
|
agpl-3.0
|
spallavolu/scikit-learn
|
examples/ensemble/plot_gradient_boosting_oob.py
|
230
|
4762
|
"""
======================================
Gradient Boosting Out-of-Bag estimates
======================================
Out-of-bag (OOB) estimates can be a useful heuristic to estimate
the "optimal" number of boosting iterations.
OOB estimates are almost identical to cross-validation estimates but
they can be computed on-the-fly without the need for repeated model
fitting.
OOB estimates are only available for Stochastic Gradient Boosting
(i.e. ``subsample < 1.0``), the estimates are derived from the improvement
in loss based on the examples not included in the bootstrap sample
(the so-called out-of-bag examples).
The OOB estimator is a pessimistic estimator of the true
test loss, but remains a fairly good approximation for a small number of trees.
The figure shows the cumulative sum of the negative OOB improvements
as a function of the boosting iteration. As you can see, it tracks the test
loss for the first hundred iterations but then diverges in a
pessimistic way.
The figure also shows the performance of 3-fold cross validation which
usually gives a better estimate of the test loss
but is computationally more demanding.
"""
print(__doc__)
# Author: Peter Prettenhofer <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import ensemble
from sklearn.cross_validation import KFold
from sklearn.cross_validation import train_test_split
# Generate data (adapted from G. Ridgeway's gbm example)
n_samples = 1000
random_state = np.random.RandomState(13)
x1 = random_state.uniform(size=n_samples)
x2 = random_state.uniform(size=n_samples)
x3 = random_state.randint(0, 4, size=n_samples)
p = 1 / (1.0 + np.exp(-(np.sin(3 * x1) - 4 * x2 + x3)))
y = random_state.binomial(1, p, size=n_samples)
X = np.c_[x1, x2, x3]
X = X.astype(np.float32)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5,
random_state=9)
# Fit classifier with out-of-bag estimates
params = {'n_estimators': 1200, 'max_depth': 3, 'subsample': 0.5,
'learning_rate': 0.01, 'min_samples_leaf': 1, 'random_state': 3}
clf = ensemble.GradientBoostingClassifier(**params)
clf.fit(X_train, y_train)
acc = clf.score(X_test, y_test)
print("Accuracy: {:.4f}".format(acc))
n_estimators = params['n_estimators']
x = np.arange(n_estimators) + 1
def heldout_score(clf, X_test, y_test):
"""compute deviance scores on ``X_test`` and ``y_test``. """
score = np.zeros((n_estimators,), dtype=np.float64)
for i, y_pred in enumerate(clf.staged_decision_function(X_test)):
score[i] = clf.loss_(y_test, y_pred)
return score
def cv_estimate(n_folds=3):
cv = KFold(n=X_train.shape[0], n_folds=n_folds)
cv_clf = ensemble.GradientBoostingClassifier(**params)
val_scores = np.zeros((n_estimators,), dtype=np.float64)
for train, test in cv:
cv_clf.fit(X_train[train], y_train[train])
val_scores += heldout_score(cv_clf, X_train[test], y_train[test])
val_scores /= n_folds
return val_scores
# Estimate best n_estimator using cross-validation
cv_score = cv_estimate(3)
# Compute best n_estimator for test data
test_score = heldout_score(clf, X_test, y_test)
# negative cumulative sum of oob improvements
cumsum = -np.cumsum(clf.oob_improvement_)
# min loss according to OOB
oob_best_iter = x[np.argmin(cumsum)]
# min loss according to test (normalize such that first loss is 0)
test_score -= test_score[0]
test_best_iter = x[np.argmin(test_score)]
# min loss according to cv (normalize such that first loss is 0)
cv_score -= cv_score[0]
cv_best_iter = x[np.argmin(cv_score)]
# color brew for the three curves
oob_color = list(map(lambda x: x / 256.0, (190, 174, 212)))
test_color = list(map(lambda x: x / 256.0, (127, 201, 127)))
cv_color = list(map(lambda x: x / 256.0, (253, 192, 134)))
# plot curves and vertical lines for best iterations
plt.plot(x, cumsum, label='OOB loss', color=oob_color)
plt.plot(x, test_score, label='Test loss', color=test_color)
plt.plot(x, cv_score, label='CV loss', color=cv_color)
plt.axvline(x=oob_best_iter, color=oob_color)
plt.axvline(x=test_best_iter, color=test_color)
plt.axvline(x=cv_best_iter, color=cv_color)
# add three vertical lines to xticks
xticks = plt.xticks()
xticks_pos = np.array(xticks[0].tolist() +
[oob_best_iter, cv_best_iter, test_best_iter])
xticks_label = np.array(list(map(lambda t: int(t), xticks[0])) +
['OOB', 'CV', 'Test'])
ind = np.argsort(xticks_pos)
xticks_pos = xticks_pos[ind]
xticks_label = xticks_label[ind]
plt.xticks(xticks_pos, xticks_label)
plt.legend(loc='upper right')
plt.ylabel('normalized loss')
plt.xlabel('number of iterations')
plt.show()
|
bsd-3-clause
|
rhyolight/nupic.research
|
projects/sequence_prediction/discrete_sequences/plotMultiplePredictionWithErrBar.py
|
12
|
6799
|
#!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Plot multiple prediction experiment result with error bars
"""
import os
import pickle
from matplotlib import pyplot as plt
import matplotlib as mpl
import numpy as np
from plot import movingAverage
from plot import computeAccuracy
from plot import readExperiment
mpl.rcParams['pdf.fonttype'] = 42
plt.ion()
plt.close('all')
def loadExperiment(experiment):
print "Loading experiment ", experiment
data = readExperiment(experiment)
(accuracy, x) = computeAccuracy(data['predictions'],
data['truths'],
data['iterations'],
resets=data['resets'],
randoms=data['randoms'])
accuracy = movingAverage(accuracy, min(len(accuracy), 100))
return (accuracy, x)
def calculateMeanStd(accuracyAll):
numRepeats = len(accuracyAll)
numLength = min([len(a) for a in accuracyAll])
accuracyMat = np.zeros(shape=(numRepeats, numLength))
for i in range(numRepeats):
accuracyMat[i, :] = accuracyAll[i][:numLength]
meanAccuracy = np.mean(accuracyMat, axis=0)
stdAccuracy = np.std(accuracyMat, axis=0)
return (meanAccuracy, stdAccuracy)
def plotWithErrBar(x, y, error, color):
plt.fill_between(x, y-error, y+error,
alpha=0.3, edgecolor=color, facecolor=color)
plt.plot(x, y, color, color=color, linewidth=4)
plt.ylabel('Prediction Accuracy')
plt.xlabel(' Number of elements seen')
if __name__ == '__main__':
try:
# Load raw experiment results
# You have to run the experiments
# In ./tm/
# python tm_suite.py --experiment="high-order-distributed-random-perturbed" -d
# In ./lstm/
# python suite.py --experiment="high-order-distributed-random-perturbed" -d
expResults = {}
tmResults = os.path.join("tm/results",
"high-order-distributed-random-multiple-predictions")
lstmResults = os.path.join("lstm/results",
"high-order-distributed-random-multiple-predictions")
elmResults = os.path.join("elm/results",
"high-order-distributed-random-multiple-predictions")
for numPrediction in [2, 4]:
accuracyTM = []
accuracyLSTM = []
accuracyELM = []
for seed in range(10):
experiment = os.path.join(tmResults,
"num_predictions{:.1f}seed{:.1f}".format(numPrediction, seed),
"0.log")
(accuracy, x) = loadExperiment(experiment)
accuracyTM.append(np.array(accuracy))
experiment = os.path.join(lstmResults,
"seed{:.1f}num_predictions{:.1f}".format(seed, numPrediction),
"0.log")
(accuracy, x) = loadExperiment(experiment)
accuracyLSTM.append(np.array(accuracy))
experiment = os.path.join(elmResults,
"seed{:.1f}num_predictions{:.1f}".format(seed, numPrediction),
"0.log")
(accuracy, x) = loadExperiment(experiment)
accuracyELM.append(np.array(accuracy))
(meanAccuracy, stdAccuracy) = calculateMeanStd(accuracyTM)
expResult = {'x': x[:len(meanAccuracy)], 'meanAccuracy': meanAccuracy, 'stdAccuracy': stdAccuracy}
expResults['HTMNumPrediction{:.0f}'.format(numPrediction)] = expResult
(meanAccuracy, stdAccuracy) = calculateMeanStd(accuracyLSTM)
expResult = {'x': x[:len(meanAccuracy)], 'meanAccuracy': meanAccuracy, 'stdAccuracy': stdAccuracy}
expResults['LSTMNumPrediction{:.0f}'.format(numPrediction)] = expResult
(meanAccuracy, stdAccuracy) = calculateMeanStd(accuracyELM)
expResult = {'x': x[:len(meanAccuracy)], 'meanAccuracy': meanAccuracy, 'stdAccuracy': stdAccuracy}
expResults['ELMNumPrediction{:.0f}'.format(numPrediction)] = expResult
output = open('./result/MultiPredictionExperiment.pkl', 'wb')
pickle.dump(expResults, output, -1)
output.close()
except:
print "Cannot find raw experiment results"
print "Plot using saved processed experiment results"
input = open('./result/MultiPredictionExperiment.pkl', 'rb')
expResults = pickle.load(input)
colorList = {"HTMNumPrediction2": "r",
"LSTMNumPrediction2": "g",
"ELMNumPrediction2": "b",
"HTMNumPrediction4": "r",
"LSTMNumPrediction4": "g",
"ELMNumPrediction4": "b"}
modelList = ['HTMNumPrediction2',
'LSTMNumPrediction2',
'ELMNumPrediction2',
'HTMNumPrediction4',
'LSTMNumPrediction4',
'ELMNumPrediction4']
plt.figure(1)
for model in ['HTMNumPrediction2',
'LSTMNumPrediction2',
'ELMNumPrediction2']:
expResult = expResults[model]
plotWithErrBar(expResult['x'],
expResult['meanAccuracy'], expResult['stdAccuracy'],
colorList[model])
plt.legend(['HTM', 'LSTM', 'ELM'], loc=4)
plt.figure(2)
for model in ['HTMNumPrediction4',
'LSTMNumPrediction4',
'ELMNumPrediction4']:
expResult = expResults[model]
plotWithErrBar(expResult['x'],
expResult['meanAccuracy'], expResult['stdAccuracy'],
colorList[model])
plt.legend(['HTM', 'LSTM', 'ELM'], loc=4)
for fig in [1, 2]:
plt.figure(fig)
retrainLSTMAt = np.arange(start=1000, stop=12000, step=1000)
for line in retrainLSTMAt:
plt.axvline(line, color='orange')
plt.ylim([-0.05, 1.05])
# plt.xlim([0, 11000])
plt.figure(1)
plt.savefig('./result/model_performance_2_prediction_errbar.pdf')
plt.figure(2)
plt.savefig('./result/model_performance_4_prediction_errbar.pdf')
|
gpl-3.0
|
jcharit1/Identifying-Ad-Images
|
code/predict_image_type.py
|
1
|
5661
|
#!/home/jim/anaconda2/envs/py35/bin/python
"""
Code for making predictions on new data
_Author: Jimmy Charité_
_Email: [email protected]_
_Date: January 9, 2017_
"""
# Packages
###############################################################################
import sys
import numpy as np
import pandas as pd
from sklearn.feature_selection import VarianceThreshold
from sklearn.linear_model import LogisticRegression
from sklearn.pipeline import Pipeline
# Arguments
###############################################################################
path_old_data = sys.argv[1] #full path of the old (training) data
path_colnames = sys.argv[2] #full path of colname file provided within the repo
path_new_data = sys.argv[3] #full path of the new data
path_pred_file = sys.argv[4] #full path of the prediction output data
# Data Prep Function
###############################################################################
def clean_data(data_path, col_name_path):
#Upload the data
raw_data=pd.read_csv(data_path,header=None)
#Upload and edit the column names
col_names=pd.read_csv(col_name_path,header=None,
sep=":")
col_names.columns=['variable','type']
col_names=pd.concat((col_names,
pd.DataFrame({'variable':['image_type'],
'type':['0,1.'] })),axis=0)
col_names=col_names[['variable','type']]
#Add column namaes to the raw data
raw_data.columns=list(col_names.variable)
#Make the data numerical
raw_data.replace({'image_type': {'nonad.':0,'ad.':1}},inplace=True)
raw_data=raw_data.apply(lambda row: pd.to_numeric(row,errors='coerce'))
raw_data.ix[raw_data.local.isnull(), 'local']=0
#Make the Continuous variables Categorical
raw_data['aratio_cat']='aratio_NaN'
raw_data.ix[(raw_data.aratio>=0) & (raw_data.aratio<2),
'aratio_cat']='aratio_0t2'
raw_data.ix[(raw_data.aratio>=2) & (raw_data.aratio<4),
'aratio_cat']='aratio_2t4'
raw_data.ix[(raw_data.aratio>=4) & (raw_data.aratio<6),
'aratio_cat']='aratio_4t6'
raw_data.ix[(raw_data.aratio>=6) & (raw_data.aratio<8),
'aratio_cat']='aratio_6t8'
raw_data.ix[(raw_data.aratio>=8) & (raw_data.aratio<10),
'aratio_cat']='aratio_8t10'
raw_data.ix[(raw_data.aratio>=10), 'aratio_cat']='aratio_10t'
aspect_cats=pd.get_dummies(raw_data['aratio_cat'])
del aspect_cats['aratio_NaN'] #comparison category
del raw_data['aratio_cat']
raw_data['height_cat']='height_NaN'
raw_data.ix[(raw_data.height>=0) & (raw_data.height<50),
'height_cat']='height_0t50'
raw_data.ix[(raw_data.height>=50) & (raw_data.height<100),
'height_cat']='height_50t100'
raw_data.ix[(raw_data.height>=100) & (raw_data.height<150),
'height_cat']='height_100t150'
raw_data.ix[(raw_data.height>=150) & (raw_data.height<200),
'height_cat']='height_150t200'
raw_data.ix[(raw_data.height>=200) & (raw_data.height<250),
'height_cat']='height_200t250'
raw_data.ix[(raw_data.height>=250) & (raw_data.height<300),
'height_cat']='height_250t300'
raw_data.ix[(raw_data.height>=300) & (raw_data.height<350),
'height_cat']='height_300t350'
raw_data.ix[(raw_data.height>=350) & (raw_data.height<400),
'height_cat']='height_350t400'
raw_data.ix[(raw_data.height>=400), 'height_cat']='height_400t'
height_cats=pd.get_dummies(raw_data['height_cat'])
del height_cats['height_NaN'] #comparison category
del raw_data['height_cat']
raw_data['width_cat']='width_NaN'
raw_data.ix[(raw_data.width>=0) & (raw_data.width<50),
'width_cat']='width_0t50'
raw_data.ix[(raw_data.width>=50) & (raw_data.width<100),
'width_cat']='width_50t100'
raw_data.ix[(raw_data.width>=100) & (raw_data.width<150),
'width_cat']='width_100t150'
raw_data.ix[(raw_data.width>=150) & (raw_data.width<200),
'width_cat']='width_150t200'
raw_data.ix[(raw_data.width>=200) & (raw_data.width<250),
'width_cat']='width_200t250'
raw_data.ix[(raw_data.width>=250) & (raw_data.width<300),
'width_cat']='width_250t300'
raw_data.ix[(raw_data.width>=300) & (raw_data.width<350),
'width_cat']='width_300t350'
raw_data.ix[(raw_data.width>=350) & (raw_data.width<400),
'width_cat']='width_350t400'
raw_data.ix[(raw_data.width>=400), 'width_cat']='width_400t'
width_cats=pd.get_dummies(raw_data['width_cat'])
del width_cats['width_NaN'] #comparison category
del raw_data['width_cat']
del raw_data['height'], raw_data['width'], raw_data['aratio']
raw_data=pd.concat([height_cats,width_cats,aspect_cats,raw_data], axis=1)
X = (raw_data.iloc[:,:-1]).as_matrix()
y = (raw_data.iloc[:,-1]).tolist()
return X, y
# Creating Sklearn Friendly Datasets
###############################################################################
X_train, y_train = clean_data(path_old_data, path_colnames)
X_test, y_test = clean_data(path_new_data, path_colnames)
# Model fitting and predictions
###############################################################################
vt=VarianceThreshold(threshold=0)
log_clf=LogisticRegression(C=10,class_weight={1: 1})
log_clf_est = Pipeline(steps=[('vt',vt),('clf',log_clf)])
log_clf_est.fit(X_train,y_train)
y_pred = log_clf_est.predict(X_test)
np.savetxt(path_pred_file, y_pred, delimiter=',')
|
mit
|
stonebig/bokeh
|
examples/models/file/latex_extension.py
|
1
|
3121
|
""" The LaTex example was derived from: http://matplotlib.org/users/usetex.html
"""
from bokeh.models import Label
from bokeh.palettes import Spectral4
from bokeh.plotting import output_file, figure, show
from bokeh.util.compiler import TypeScript
import numpy as np
from scipy.special import jv
output_file('latex_extension.html')
class LatexLabel(Label):
"""A subclass of `Label` with all of the same class attributes except
canvas mode isn't supported and DOM manipulation happens in the TypeScript
superclass implementation that requires setting `render_mode='css'`).
Only the render method of LabelView is overwritten to perform the
text -> latex (via katex) conversion
"""
__javascript__ = ["https://cdnjs.cloudflare.com/ajax/libs/KaTeX/0.10.0/katex.min.js"]
__css__ = ["https://cdnjs.cloudflare.com/ajax/libs/KaTeX/0.10.0/katex.min.css"]
__implementation__ = TypeScript("""
import {Label, LabelView} from "models/annotations/label"
declare namespace katex {
function render(expression: string, element: HTMLElement, options: {displayMode?: boolean}): void
}
export class LatexLabelView extends LabelView {
model: LatexLabel
render(): void {
// Here because AngleSpec does units tranform and label doesn't support specs
let angle: number
switch (this.model.angle_units) {
case "rad": {
angle = -1 * this.model.angle
break
}
case "deg": {
angle = -1 * this.model.angle * Math.PI/180.0
break
}
default:
throw new Error("unreachable")
}
const panel = this.panel || this.plot_view.frame
const xscale = this.plot_view.frame.xscales[this.model.x_range_name]
const yscale = this.plot_view.frame.yscales[this.model.y_range_name]
const {x, y} = this.model
let sx = this.model.x_units == "data" ? xscale.compute(x) : panel.xview.compute(x)
let sy = this.model.y_units == "data" ? yscale.compute(y) : panel.yview.compute(y)
sx += this.model.x_offset
sy -= this.model.y_offset
this._css_text(this.plot_view.canvas_view.ctx, "", sx, sy, angle)
katex.render(this.model.text, this.el, {displayMode: true})
}
}
export class LatexLabel extends Label {
static initClass(): void {
this.prototype.type = "LatexLabel"
this.prototype.default_view = LatexLabelView
}
}
LatexLabel.initClass()
""")
p = figure(title="LaTex Extension Demonstration", plot_width=800, plot_height=350,
background_fill_color="#fafafa")
p.x_range.range_padding = 0
x = np.arange(0.0, 20.0, 0.02)
for i, n in enumerate([0, 1, 4, 7]):
p.line(x, jv(n, x), line_width=3, color=Spectral4[i], alpha=0.8, legend="𝜈=%d" % n)
text = (r"\text{Bessel Functions of the First Kind: }" +
r"J_\nu = \sum_{m=0}^{\infty}\frac{(-1)^m}{m!\ \Gamma(m+\nu+1)}" +
r"\left(\frac{x}{2}\right)^{2m+\nu}")
latex = LatexLabel(text=text,x=4.5, y=250, x_units='data', y_units='screen',
render_mode='css', text_font_size='8pt',
background_fill_color="white", border_line_color="lightgrey")
p.add_layout(latex)
show(p)
|
bsd-3-clause
|
perimosocordiae/scipy
|
scipy/special/_precompute/wright_bessel.py
|
12
|
12928
|
"""Precompute coefficients of several series expansions
of Wright's generalized Bessel function Phi(a, b, x).
See https://dlmf.nist.gov/10.46.E1 with rho=a, beta=b, z=x.
"""
from argparse import ArgumentParser, RawTextHelpFormatter
import numpy as np
from scipy.integrate import quad
from scipy.optimize import minimize_scalar, curve_fit
from time import time
try:
import sympy # type: ignore[import]
from sympy import EulerGamma, Rational, S, Sum, \
factorial, gamma, gammasimp, pi, polygamma, symbols, zeta
from sympy.polys.polyfuncs import horner # type: ignore[import]
except ImportError:
pass
def series_small_a():
"""Tylor series expansion of Phi(a, b, x) in a=0 up to order 5.
"""
order = 5
a, b, x, k = symbols("a b x k")
A = [] # terms with a
X = [] # terms with x
B = [] # terms with b (polygammas)
# Phi(a, b, x) = exp(x)/gamma(b) * sum(A[i] * X[i] * B[i])
expression = Sum(x**k/factorial(k)/gamma(a*k+b), (k, 0, S.Infinity))
expression = gamma(b)/sympy.exp(x) * expression
# nth term of taylor series in a=0: a^n/n! * (d^n Phi(a, b, x)/da^n at a=0)
for n in range(0, order+1):
term = expression.diff(a, n).subs(a, 0).simplify().doit()
# set the whole bracket involving polygammas to 1
x_part = (term.subs(polygamma(0, b), 1)
.replace(polygamma, lambda *args: 0))
# sign convetion: x part always positive
x_part *= (-1)**n
A.append(a**n/factorial(n))
X.append(horner(x_part))
B.append(horner((term/x_part).simplify()))
s = "Tylor series expansion of Phi(a, b, x) in a=0 up to order 5.\n"
s += "Phi(a, b, x) = exp(x)/gamma(b) * sum(A[i] * X[i] * B[i], i=0..5)\n"
for name, c in zip(['A', 'X', 'B'], [A, X, B]):
for i in range(len(c)):
s += f"\n{name}[{i}] = " + str(c[i])
return s
# expansion of digamma
def dg_series(z, n):
"""Symbolic expansion of digamma(z) in z=0 to order n.
See https://dlmf.nist.gov/5.7.E4 and with https://dlmf.nist.gov/5.5.E2
"""
k = symbols("k")
return -1/z - EulerGamma + \
sympy.summation((-1)**k * zeta(k) * z**(k-1), (k, 2, n+1))
def pg_series(k, z, n):
"""Symbolic expansion of polygamma(k, z) in z=0 to order n."""
return sympy.diff(dg_series(z, n+k), z, k)
def series_small_a_small_b():
"""Tylor series expansion of Phi(a, b, x) in a=0 and b=0 up to order 5.
Be aware of cancellation of poles in b=0 of digamma(b)/Gamma(b) and
polygamma functions.
digamma(b)/Gamma(b) = -1 - 2*M_EG*b + O(b^2)
digamma(b)^2/Gamma(b) = 1/b + 3*M_EG + b*(-5/12*PI^2+7/2*M_EG^2) + O(b^2)
polygamma(1, b)/Gamma(b) = 1/b + M_EG + b*(1/12*PI^2 + 1/2*M_EG^2) + O(b^2)
and so on.
"""
order = 5
a, b, x, k = symbols("a b x k")
M_PI, M_EG, M_Z3 = symbols("M_PI M_EG M_Z3")
c_subs = {pi: M_PI, EulerGamma: M_EG, zeta(3): M_Z3}
A = [] # terms with a
X = [] # terms with x
B = [] # terms with b (polygammas expanded)
C = [] # terms that generate B
# Phi(a, b, x) = exp(x) * sum(A[i] * X[i] * B[i])
# B[0] = 1
# B[k] = sum(C[k] * b**k/k!, k=0..)
# Note: C[k] can be obtained from a series expansion of 1/gamma(b).
expression = gamma(b)/sympy.exp(x) * \
Sum(x**k/factorial(k)/gamma(a*k+b), (k, 0, S.Infinity))
# nth term of taylor series in a=0: a^n/n! * (d^n Phi(a, b, x)/da^n at a=0)
for n in range(0, order+1):
term = expression.diff(a, n).subs(a, 0).simplify().doit()
# set the whole bracket involving polygammas to 1
x_part = (term.subs(polygamma(0, b), 1)
.replace(polygamma, lambda *args: 0))
# sign convetion: x part always positive
x_part *= (-1)**n
# expansion of polygamma part with 1/gamma(b)
pg_part = term/x_part/gamma(b)
if n >= 1:
# Note: highest term is digamma^n
pg_part = pg_part.replace(polygamma,
lambda k, x: pg_series(k, x, order+1+n))
pg_part = (pg_part.series(b, 0, n=order+1-n)
.removeO()
.subs(polygamma(2, 1), -2*zeta(3))
.simplify()
)
A.append(a**n/factorial(n))
X.append(horner(x_part))
B.append(pg_part)
# Calculate C and put in the k!
C = sympy.Poly(B[1].subs(c_subs), b).coeffs()
C.reverse()
for i in range(len(C)):
C[i] = (C[i] * factorial(i)).simplify()
s = "Tylor series expansion of Phi(a, b, x) in a=0 and b=0 up to order 5."
s += "\nPhi(a, b, x) = exp(x) * sum(A[i] * X[i] * B[i], i=0..5)\n"
s += "B[0] = 1\n"
s += "B[i] = sum(C[k+i-1] * b**k/k!, k=0..)\n"
s += "\nM_PI = pi"
s += "\nM_EG = EulerGamma"
s += "\nM_Z3 = zeta(3)"
for name, c in zip(['A', 'X'], [A, X]):
for i in range(len(c)):
s += f"\n{name}[{i}] = "
s += str(c[i])
# For C, do also compute the values numerically
for i in range(len(C)):
s += f"\n# C[{i}] = "
s += str(C[i])
s += f"\nC[{i}] = "
s += str(C[i].subs({M_EG: EulerGamma, M_PI: pi, M_Z3: zeta(3)})
.evalf(17))
# Does B have the assumed structure?
s += "\n\nTest if B[i] does have the assumed structure."
s += "\nC[i] are derived from B[1] allone."
s += "\nTest B[2] == C[1] + b*C[2] + b^2/2*C[3] + b^3/6*C[4] + .."
test = sum([b**k/factorial(k) * C[k+1] for k in range(order-1)])
test = (test - B[2].subs(c_subs)).simplify()
s += f"\ntest successful = {test==S(0)}"
s += "\nTest B[3] == C[2] + b*C[3] + b^2/2*C[4] + .."
test = sum([b**k/factorial(k) * C[k+2] for k in range(order-2)])
test = (test - B[3].subs(c_subs)).simplify()
s += f"\ntest successful = {test==S(0)}"
return s
def asymptotic_series():
"""Asymptotic expansion for large x.
Phi(a, b, x) ~ Z^(1/2-b) * exp((1+a)/a * Z) * sum_k (-1)^k * C_k / Z^k
Z = (a*x)^(1/(1+a))
Wright (1935) lists the coefficients C_0 and C_1 (he calls them a_0 and
a_1). With slightly different notation, Paris (2017) lists coefficients
c_k up to order k=3.
Paris (2017) uses ZP = (1+a)/a * Z (ZP = Z of Paris) and
C_k = C_0 * (-a/(1+a))^k * c_k
"""
order = 8
class g(sympy.Function):
"""Helper function g according to Wright (1935)
g(n, rho, v) = (1 + (rho+2)/3 * v + (rho+2)*(rho+3)/(2*3) * v^2 + ...)
Note: Wright (1935) uses square root of above definition.
"""
nargs = 3
@classmethod
def eval(cls, n, rho, v):
if not n >= 0:
raise ValueError("must have n >= 0")
elif n == 0:
return 1
else:
return g(n-1, rho, v) \
+ gammasimp(gamma(rho+2+n)/gamma(rho+2)) \
/ gammasimp(gamma(3+n)/gamma(3))*v**n
class coef_C(sympy.Function):
"""Calculate coefficients C_m for integer m.
C_m is the coefficient of v^(2*m) in the Taylor expansion in v=0 of
Gamma(m+1/2)/(2*pi) * (2/(rho+1))^(m+1/2) * (1-v)^(-b)
* g(rho, v)^(-m-1/2)
"""
nargs = 3
@classmethod
def eval(cls, m, rho, beta):
if not m >= 0:
raise ValueError("must have m >= 0")
v = symbols("v")
expression = (1-v)**(-beta) * g(2*m, rho, v)**(-m-Rational(1, 2))
res = expression.diff(v, 2*m).subs(v, 0) / factorial(2*m)
res = res * (gamma(m + Rational(1, 2)) / (2*pi)
* (2/(rho+1))**(m + Rational(1, 2)))
return res
# in order to have nice ordering/sorting of expressions, we set a = xa.
xa, b, xap1 = symbols("xa b xap1")
C0 = coef_C(0, xa, b)
# a1 = a(1, rho, beta)
s = "Asymptotic expansion for large x\n"
s += "Phi(a, b, x) = Z**(1/2-b) * exp((1+a)/a * Z) \n"
s += " * sum((-1)**k * C[k]/Z**k, k=0..6)\n\n"
s += "Z = pow(a * x, 1/(1+a))\n"
s += "A[k] = pow(a, k)\n"
s += "B[k] = pow(b, k)\n"
s += "Ap1[k] = pow(1+a, k)\n\n"
s += "C[0] = 1./sqrt(2. * M_PI * Ap1[1])\n"
for i in range(1, order+1):
expr = (coef_C(i, xa, b) / (C0/(1+xa)**i)).simplify()
factor = [x.denominator() for x in sympy.Poly(expr).coeffs()]
factor = sympy.lcm(factor)
expr = (expr * factor).simplify().collect(b, sympy.factor)
expr = expr.xreplace({xa+1: xap1})
s += f"C[{i}] = C[0] / ({factor} * Ap1[{i}])\n"
s += f"C[{i}] *= {str(expr)}\n\n"
import re
re_a = re.compile(r'xa\*\*(\d+)')
s = re_a.sub(r'A[\1]', s)
re_b = re.compile(r'b\*\*(\d+)')
s = re_b.sub(r'B[\1]', s)
s = s.replace('xap1', 'Ap1[1]')
s = s.replace('xa', 'a')
# max integer = 2^31-1 = 2,147,483,647. Solution: Put a point after 10
# or more digits.
re_digits = re.compile(r'(\d{10,})')
s = re_digits.sub(r'\1.', s)
return s
def optimal_epsilon_integral():
"""Fit optimal choice of epsilon for integral representation.
The integrand of
int_0^pi P(eps, a, b, x, phi) * dphi
can exhibit oscillatory behaviour. It stems from the cosine of P and can be
minimized by minimizing the arc length of the argument
f(phi) = eps * sin(phi) - x * eps^(-a) * sin(a * phi) + (1 - b) * phi
of cos(f(phi)).
We minimize the arc length in eps for a grid of values (a, b, x) and fit a
parametric function to it.
"""
def fp(eps, a, b, x, phi):
"""Derivative of f w.r.t. phi."""
eps_a = np.power(1. * eps, -a)
return eps * np.cos(phi) - a * x * eps_a * np.cos(a * phi) + 1 - b
def arclength(eps, a, b, x, epsrel=1e-2, limit=100):
"""Compute Arc length of f.
Note that the arg length of a function f fro t0 to t1 is given by
int_t0^t1 sqrt(1 + f'(t)^2) dt
"""
return quad(lambda phi: np.sqrt(1 + fp(eps, a, b, x, phi)**2),
0, np.pi,
epsrel=epsrel, limit=100)[0]
# grid of minimal arc length values
data_a = [1e-3, 0.1, 0.5, 0.9, 1, 2, 4, 5, 6, 8]
data_b = [0, 1, 4, 7, 10]
data_x = [1, 1.5, 2, 4, 10, 20, 50, 100, 200, 500, 1e3, 5e3, 1e4]
data_a, data_b, data_x = np.meshgrid(data_a, data_b, data_x)
data_a, data_b, data_x = (data_a.flatten(), data_b.flatten(),
data_x.flatten())
best_eps = []
for i in range(data_x.size):
best_eps.append(
minimize_scalar(lambda eps: arclength(eps, data_a[i], data_b[i],
data_x[i]),
bounds=(1e-3, 1000),
method='Bounded', options={'xatol': 1e-3}).x
)
best_eps = np.array(best_eps)
# pandas would be nice, but here a dictionary is enough
df = {'a': data_a,
'b': data_b,
'x': data_x,
'eps': best_eps,
}
def func(data, A0, A1, A2, A3, A4, A5):
"""Compute parametric function to fit."""
a = data['a']
b = data['b']
x = data['x']
return (A0 * b * np.exp(-0.5 * a)
+ np.exp(A1 + 1 / (1 + a) * np.log(x) - A2 * np.exp(-A3 * a)
+ A4 / (1 + np.exp(A5 * a))))
func_params = list(curve_fit(func, df, df['eps'], method='trf')[0])
s = "Fit optimal eps for integrand P via minimal arc length\n"
s += "with parametric function:\n"
s += "optimal_eps = (A0 * b * exp(-a/2) + exp(A1 + 1 / (1 + a) * log(x)\n"
s += " - A2 * exp(-A3 * a) + A4 / (1 + exp(A5 * a)))\n\n"
s += "Fitted parameters A0 to A5 are:\n"
s += ', '.join(['{:.5g}'.format(x) for x in func_params])
return s
def main():
t0 = time()
parser = ArgumentParser(description=__doc__,
formatter_class=RawTextHelpFormatter)
parser.add_argument('action', type=int, choices=[1, 2, 3, 4],
help='chose what expansion to precompute\n'
'1 : Series for small a\n'
'2 : Series for small a and small b\n'
'3 : Asymptotic series for large x\n'
' This may take some time (>4h).\n'
'4 : Fit optimal eps for integral representation.'
)
args = parser.parse_args()
switch = {1: lambda: print(series_small_a()),
2: lambda: print(series_small_a_small_b()),
3: lambda: print(asymptotic_series()),
4: lambda: print(optimal_epsilon_integral())
}
switch.get(args.action, lambda: print("Invalid input."))()
print("\n{:.1f} minutes elapsed.\n".format((time() - t0)/60))
if __name__ == '__main__':
main()
|
bsd-3-clause
|
gengliangwang/spark
|
python/pyspark/pandas/tests/test_sql.py
|
15
|
1979
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from pyspark import pandas as ps
from pyspark.sql.utils import ParseException
from pyspark.testing.pandasutils import PandasOnSparkTestCase
from pyspark.testing.sqlutils import SQLTestUtils
class SQLTest(PandasOnSparkTestCase, SQLTestUtils):
def test_error_variable_not_exist(self):
msg = "The key variable_foo in the SQL statement was not found.*"
with self.assertRaisesRegex(ValueError, msg):
ps.sql("select * from {variable_foo}")
def test_error_unsupported_type(self):
msg = "Unsupported variable type dict: {'a': 1}"
with self.assertRaisesRegex(ValueError, msg):
some_dict = {"a": 1}
ps.sql("select * from {some_dict}")
def test_error_bad_sql(self):
with self.assertRaises(ParseException):
ps.sql("this is not valid sql")
if __name__ == "__main__":
import unittest
from pyspark.pandas.tests.test_sql import * # noqa: F401
try:
import xmlrunner # type: ignore[import]
testRunner = xmlrunner.XMLTestRunner(output="target/test-reports", verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
|
apache-2.0
|
lancezlin/ml_template_py
|
lib/python2.7/site-packages/sklearn/utils/setup.py
|
77
|
2993
|
import os
from os.path import join
from sklearn._build_utils import get_blas_info
def configuration(parent_package='', top_path=None):
import numpy
from numpy.distutils.misc_util import Configuration
config = Configuration('utils', parent_package, top_path)
config.add_subpackage('sparsetools')
cblas_libs, blas_info = get_blas_info()
cblas_compile_args = blas_info.pop('extra_compile_args', [])
cblas_includes = [join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])]
libraries = []
if os.name == 'posix':
libraries.append('m')
cblas_libs.append('m')
config.add_extension('sparsefuncs_fast', sources=['sparsefuncs_fast.pyx'],
libraries=libraries)
config.add_extension('arrayfuncs',
sources=['arrayfuncs.pyx'],
depends=[join('src', 'cholesky_delete.h')],
libraries=cblas_libs,
include_dirs=cblas_includes,
extra_compile_args=cblas_compile_args,
**blas_info
)
config.add_extension('murmurhash',
sources=['murmurhash.pyx', join(
'src', 'MurmurHash3.cpp')],
include_dirs=['src'])
config.add_extension('lgamma',
sources=['lgamma.pyx', join('src', 'gamma.c')],
include_dirs=['src'],
libraries=libraries)
config.add_extension('graph_shortest_path',
sources=['graph_shortest_path.pyx'],
include_dirs=[numpy.get_include()])
config.add_extension('fast_dict',
sources=['fast_dict.pyx'],
language="c++",
include_dirs=[numpy.get_include()],
libraries=libraries)
config.add_extension('seq_dataset',
sources=['seq_dataset.pyx'],
include_dirs=[numpy.get_include()])
config.add_extension('weight_vector',
sources=['weight_vector.pyx'],
include_dirs=cblas_includes,
libraries=cblas_libs,
**blas_info)
config.add_extension("_random",
sources=["_random.pyx"],
include_dirs=[numpy.get_include()],
libraries=libraries)
config.add_extension("_logistic_sigmoid",
sources=["_logistic_sigmoid.pyx"],
include_dirs=[numpy.get_include()],
libraries=libraries)
config.add_subpackage('tests')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
|
mit
|
pyannote/pyannote-audio
|
pyannote/audio/tasks/segmentation/segmentation.py
|
1
|
17573
|
# MIT License
#
# Copyright (c) 2020-2021 CNRS
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import math
from collections import Counter
from typing import Text, Tuple, Union
import matplotlib.pyplot as plt
import numpy as np
import torch
from torch_audiomentations.core.transforms_interface import BaseWaveformTransform
from typing_extensions import Literal
from pyannote.audio.core.task import Problem, Resolution, Specifications, Task
from pyannote.audio.tasks.segmentation.mixins import SegmentationTaskMixin
from pyannote.audio.utils.loss import binary_cross_entropy, mse_loss
from pyannote.audio.utils.permutation import permutate
from pyannote.core import SlidingWindow
from pyannote.database import Protocol
class Segmentation(SegmentationTaskMixin, Task):
"""Segmentation
Note that data augmentation is used to increase the proportion of "overlap".
This is achieved by generating chunks made out of the (weighted) sum of two
random chunks.
Parameters
----------
protocol : Protocol
pyannote.database protocol
duration : float, optional
Chunks duration. Defaults to 2s.
warm_up : float or (float, float), optional
Use that many seconds on the left- and rightmost parts of each chunk
to warm up the model. While the model does process those left- and right-most
parts, only the remaining central part of each chunk is used for computing the
loss during training, and for aggregating scores during inference.
Defaults to 0. (i.e. no warm-up).
balance: str, optional
When provided, training samples are sampled uniformly with respect to that key.
For instance, setting `balance` to "uri" will make sure that each file will be
equally represented in the training samples.
overlap: dict, optional
Controls how artificial chunks with overlapping speech are generated:
- "probability" key is the probability of artificial overlapping chunks. Setting
"probability" to 0.6 means that, on average, 40% of training chunks are "real"
chunks, while 60% are artifical chunks made out of the (weighted) sum of two
chunks. Defaults to 0.5.
- "snr_min" and "snr_max" keys control the minimum and maximum signal-to-noise
ratio between summed chunks, in dB. Default to 0.0 and 10.
weight: str, optional
When provided, use this key to as frame-wise weight in loss function.
batch_size : int, optional
Number of training samples per batch. Defaults to 32.
num_workers : int, optional
Number of workers used for generating training samples.
Defaults to multiprocessing.cpu_count() // 2.
pin_memory : bool, optional
If True, data loaders will copy tensors into CUDA pinned
memory before returning them. See pytorch documentation
for more details. Defaults to False.
augmentation : BaseWaveformTransform, optional
torch_audiomentations waveform transform, used by dataloader
during training.
vad_loss : {"bce", "mse"}, optional
Add voice activity detection loss.
"""
ACRONYM = "seg"
OVERLAP_DEFAULTS = {"probability": 0.5, "snr_min": 0.0, "snr_max": 10.0}
def __init__(
self,
protocol: Protocol,
duration: float = 2.0,
warm_up: Union[float, Tuple[float, float]] = 0.0,
overlap: dict = OVERLAP_DEFAULTS,
balance: Text = None,
weight: Text = None,
batch_size: int = 32,
num_workers: int = None,
pin_memory: bool = False,
augmentation: BaseWaveformTransform = None,
loss: Literal["bce", "mse"] = "bce",
vad_loss: Literal["bce", "mse"] = None,
):
super().__init__(
protocol,
duration=duration,
warm_up=warm_up,
batch_size=batch_size,
num_workers=num_workers,
pin_memory=pin_memory,
augmentation=augmentation,
)
self.overlap = overlap
self.balance = balance
self.weight = weight
if loss not in ["bce", "mse"]:
raise ValueError("'loss' must be one of {'bce', 'mse'}.")
self.loss = loss
self.vad_loss = vad_loss
def setup(self, stage=None):
super().setup(stage=stage)
if stage == "fit":
# slide a window (with 1s step) over the whole training set
# and keep track of the number of speakers in each location
num_speakers = []
for file in self._train:
start = file["annotated"][0].start
end = file["annotated"][-1].end
window = SlidingWindow(
start=start,
end=end,
duration=self.duration,
step=1.0,
)
for chunk in window:
num_speakers.append(len(file["annotation"].crop(chunk).labels()))
# because there might a few outliers, estimate the upper bound for the
# number of speakers as the 99th percentile
num_speakers, counts = zip(*list(Counter(num_speakers).items()))
num_speakers, counts = np.array(num_speakers), np.array(counts)
sorting_indices = np.argsort(num_speakers)
num_speakers = num_speakers[sorting_indices]
counts = counts[sorting_indices]
self.num_speakers = num_speakers[
np.where(np.cumsum(counts) / np.sum(counts) > 0.99)[0][0]
]
# TODO: add a few more speakers to make sure we don't skip
# too many artificial chunks (which might result in less
# overlap that we think we have)
# now that we know about the number of speakers upper bound
# we can set task specifications
self.specifications = Specifications(
problem=Problem.MULTI_LABEL_CLASSIFICATION,
resolution=Resolution.FRAME,
duration=self.duration,
warm_up=self.warm_up,
classes=[f"speaker#{i+1}" for i in range(self.num_speakers)],
permutation_invariant=True,
)
def prepare_y(self, one_hot_y: np.ndarray):
"""Zero-pad segmentation targets
Parameters
----------
one_hot_y : (num_frames, num_speakers) np.ndarray
One-hot-encoding of current chunk speaker activity:
* one_hot_y[t, k] = 1 if kth speaker is active at tth frame
* one_hot_y[t, k] = 0 otherwise.
Returns
-------
padded_one_hot_y : (num_frames, self.num_speakers) np.ndarray
One-hot-encoding of current chunk speaker activity:
* one_hot_y[t, k] = 1 if kth speaker is active at tth frame
* one_hot_y[t, k] = 0 otherwise.
"""
num_frames, num_speakers = one_hot_y.shape
if num_speakers > self.num_speakers:
raise ValueError()
if num_speakers < self.num_speakers:
one_hot_y = np.pad(
one_hot_y, ((0, 0), (0, self.num_speakers - num_speakers))
)
return one_hot_y
def val__getitem__(self, idx):
f, chunk = self._validation[idx]
sample = self.prepare_chunk(f, chunk, duration=self.duration, stage="val")
y, labels = sample["y"], sample.pop("labels")
# since number of speakers is estimated from the training set,
# we might encounter validation chunks that have more speakers.
# in that case, we arbitrarily remove last speakers
if y.shape[1] > self.num_speakers:
y = y[:, : self.num_speakers]
labels = labels[: self.num_speakers]
sample["y"] = self.prepare_y(y)
return sample
def segmentation_loss(
self,
permutated_prediction: torch.Tensor,
target: torch.Tensor,
weight: torch.Tensor = None,
) -> torch.Tensor:
"""Permutation-invariant segmentation loss
Parameters
----------
permutated_prediction : (batch_size, num_frames, num_classes) torch.Tensor
Permutated speaker activity predictions.
target : (batch_size, num_frames, num_speakers) torch.Tensor
Speaker activity.
weight : (batch_size, num_frames, 1) torch.Tensor, optional
Frames weight.
Returns
-------
seg_loss : torch.Tensor
Permutation-invariant segmentation loss
"""
if self.loss == "bce":
seg_loss = binary_cross_entropy(
permutated_prediction, target.float(), weight=weight
)
elif self.loss == "mse":
seg_loss = mse_loss(permutated_prediction, target.float(), weight=weight)
return seg_loss
def voice_activity_detection_loss(
self,
permutated_prediction: torch.Tensor,
target: torch.Tensor,
weight: torch.Tensor = None,
) -> torch.Tensor:
"""Voice activity detection loss
Parameters
----------
permutated_prediction : (batch_size, num_frames, num_classes) torch.Tensor
Speaker activity predictions.
target : (batch_size, num_frames, num_speakers) torch.Tensor
Speaker activity.
weight : (batch_size, num_frames, 1) torch.Tensor, optional
Frames weight.
Returns
-------
vad_loss : torch.Tensor
Voice activity detection loss.
"""
vad_prediction, _ = torch.max(permutated_prediction, dim=2, keepdim=True)
# (batch_size, num_frames, 1)
vad_target, _ = torch.max(target.float(), dim=2, keepdim=False)
# (batch_size, num_frames)
if self.vad_loss == "bce":
loss = binary_cross_entropy(vad_prediction, vad_target, weight=weight)
elif self.vad_loss == "mse":
loss = mse_loss(vad_prediction, vad_target, weight=weight)
return loss
def training_step(self, batch, batch_idx: int):
"""Compute permutation-invariant binary cross-entropy
Parameters
----------
batch : (usually) dict of torch.Tensor
Current batch.
batch_idx: int
Batch index.
Returns
-------
loss : {str: torch.tensor}
{"loss": loss}
"""
# forward pass
prediction = self.model(batch["X"])
batch_size, num_frames, _ = prediction.shape
# (batch_size, num_frames, num_classes)
# target
target = batch["y"]
permutated_prediction, _ = permutate(target, prediction)
# frames weight
weight_key = getattr(self, "weight", None)
weight = batch.get(
weight_key,
torch.ones(batch_size, num_frames, 1, device=self.model.device),
)
# (batch_size, num_frames, 1)
# warm-up
warm_up_left = round(self.warm_up[0] / self.duration * num_frames)
weight[:, :warm_up_left] = 0.0
warm_up_right = round(self.warm_up[1] / self.duration * num_frames)
weight[:, num_frames - warm_up_right :] = 0.0
seg_loss = self.segmentation_loss(permutated_prediction, target, weight=weight)
self.model.log(
f"{self.ACRONYM}@train_seg_loss",
seg_loss,
on_step=False,
on_epoch=True,
prog_bar=False,
logger=True,
)
if self.vad_loss is None:
vad_loss = 0.0
else:
vad_loss = self.voice_activity_detection_loss(
permutated_prediction, target, weight=weight
)
self.model.log(
f"{self.ACRONYM}@train_vad_loss",
vad_loss,
on_step=False,
on_epoch=True,
prog_bar=False,
logger=True,
)
loss = seg_loss + vad_loss
self.model.log(
f"{self.ACRONYM}@train_loss",
loss,
on_step=False,
on_epoch=True,
prog_bar=True,
logger=True,
)
return {"loss": loss}
def validation_step(self, batch, batch_idx: int):
"""Compute validation F-score
Parameters
----------
batch : dict of torch.Tensor
Current batch.
batch_idx: int
Batch index.
"""
# move metric to model device
self.val_fbeta.to(self.model.device)
X, y = batch["X"], batch["y"]
# X = (batch_size, num_channels, num_samples)
# y = (batch_size, num_frames, num_classes)
y_pred = self.model(X)
_, num_frames, _ = y_pred.shape
# y_pred = (batch_size, num_frames, num_classes)
permutated_y_pred, _ = permutate(y, y_pred)
warm_up_left = round(self.warm_up[0] / self.duration * num_frames)
warm_up_right = round(self.warm_up[1] / self.duration * num_frames)
val_fbeta = self.val_fbeta(
permutated_y_pred[
:, warm_up_left : num_frames - warm_up_right : 10
].squeeze(),
y[:, warm_up_left : num_frames - warm_up_right : 10].squeeze(),
)
self.model.log(
f"{self.ACRONYM}@val_fbeta",
val_fbeta,
on_step=False,
on_epoch=True,
prog_bar=True,
logger=True,
)
# log first batch visualization every 2^n epochs.
if (
self.model.current_epoch == 0
or math.log2(self.model.current_epoch) % 1 > 0
or batch_idx > 0
):
return
# visualize first 9 validation samples of first batch in Tensorboard
X = X.cpu().numpy()
y = y.float().cpu().numpy()
y_pred = y_pred.cpu().numpy()
permutated_y_pred = permutated_y_pred.cpu().numpy()
# prepare 3 x 3 grid (or smaller if batch size is smaller)
num_samples = min(self.batch_size, 9)
nrows = math.ceil(math.sqrt(num_samples))
ncols = math.ceil(num_samples / nrows)
fig, axes = plt.subplots(
nrows=4 * nrows,
ncols=ncols,
figsize=(15, 10),
)
# reshape target so that there is one line per class when plottingit
y[y == 0] = np.NaN
y *= np.arange(y.shape[2])
# plot each sample
for sample_idx in range(num_samples):
# find where in the grid it should be plotted
row_idx = sample_idx // nrows
col_idx = sample_idx % ncols
# plot waveform
ax_wav = axes[row_idx * 4 + 0, col_idx]
sample_X = np.mean(X[sample_idx], axis=0)
ax_wav.plot(sample_X)
ax_wav.set_xlim(0, len(sample_X))
ax_wav.get_xaxis().set_visible(False)
ax_wav.get_yaxis().set_visible(False)
# plot target
ax_ref = axes[row_idx * 4 + 1, col_idx]
sample_y = y[sample_idx]
ax_ref.plot(sample_y)
ax_ref.set_xlim(0, len(sample_y))
ax_ref.set_ylim(-1, sample_y.shape[1])
ax_ref.get_xaxis().set_visible(False)
ax_ref.get_yaxis().set_visible(False)
# plot prediction
ax_hyp = axes[row_idx * 4 + 2, col_idx]
sample_y_pred = y_pred[sample_idx]
ax_hyp.axvspan(0, warm_up_left, color="k", alpha=0.5, lw=0)
ax_hyp.axvspan(
num_frames - warm_up_right, num_frames, color="k", alpha=0.5, lw=0
)
ax_hyp.plot(sample_y_pred)
ax_hyp.set_ylim(-0.1, 1.1)
ax_hyp.set_xlim(0, len(sample_y))
ax_hyp.get_xaxis().set_visible(False)
# plot permutated prediction
ax_map = axes[row_idx * 4 + 3, col_idx]
sample_y_pred_map = permutated_y_pred[sample_idx]
ax_map.axvspan(0, warm_up_left, color="k", alpha=0.5, lw=0)
ax_map.axvspan(
num_frames - warm_up_right, num_frames, color="k", alpha=0.5, lw=0
)
ax_map.plot(sample_y_pred_map)
ax_map.set_ylim(-0.1, 1.1)
ax_map.set_xlim(0, len(sample_y))
plt.tight_layout()
self.model.logger.experiment.add_figure(
f"{self.ACRONYM}@val_samples", fig, self.model.current_epoch
)
plt.close(fig)
|
mit
|
mdscruggs/ga
|
ga/examples/travelling_salesman.py
|
1
|
6091
|
import math
import random
try:
import matplotlib.pyplot as plt
import matplotlib.animation as animation
plt.style.use('ggplot')
except ImportError:
plt = None
from ..algorithms import BaseGeneticAlgorithm
from ..chromosomes import ReorderingSetChromosome
from ..genes import BinaryGene
from ..translators import BinaryIntTranslator
class TravellingSalesmanGA(BaseGeneticAlgorithm):
def __init__(self, city_distances, *args, **kwargs):
"""
city_distances: 2-deep mapping of city_id -> city_id -> distance
"""
super().__init__(*args, **kwargs)
self.city_distances = city_distances
self.max_distance = max([max(subdict.values()) for subdict in city_distances.values()])
self.num_cities = len(self.city_distances)
def calc_distance(self, chromosome, pow=1):
# get list of city IDs
city_ids = self.translator.translate_chromosome(chromosome)
# compute distance travelled
tot_dist = 0
for i, start_city_id in enumerate(city_ids[:-1]):
end_city_id = city_ids[i + 1]
tot_dist += self.city_distances[start_city_id][end_city_id] ** pow
tot_dist += self.city_distances[city_ids[-1]][city_ids[0]] ** pow
return tot_dist
def eval_fitness(self, chromosome):
"""
Calculate the distance travelled by the salesman by converting
the solution/chromosome into a sequence of visited city IDs.
Penalty distance is added each time any of these conditions occur:
1. cities are visited multiple times
2. not all cities are visited
3. an invalid city ID is encountered
return: fitness value
"""
return -self.calc_distance(chromosome, pow=2)
def run(num_cities=20, num_chromosomes=20, generations=2500, plot=True):
# solve a simple travelling salesman problem
rs = random.randint(1, 1000000)
random.seed(100)
gene_length = -1
for i in range(1, num_cities + 1):
if 2 ** i >= num_cities:
gene_length = i + 1
break
assert gene_length >= 1
city_ids = list(range(1, num_cities + 1))
city_points = {city_id: (random.random() * 100, random.random() * 100) for city_id in city_ids}
city_distances = {}
for start_city_id in city_ids:
city_distances[start_city_id] = {}
x1, y1 = city_points[start_city_id]
for end_city_id in city_ids:
x2, y2 = city_points[end_city_id]
dist = math.sqrt((x2 - x1)**2 + (y2 - y1)**2)
city_distances[start_city_id][end_city_id] = dist
print("city distances:")
for start_city_id in city_ids:
for end_city_id in city_ids:
print("distance from", start_city_id, "to", end_city_id, "=", city_distances[start_city_id][end_city_id])
random.seed(rs)
chromosomes = []
for x in range(num_chromosomes):
genes = []
for city_id in city_ids:
dna = ('{:0' + str(gene_length) + 'd}').format(int(bin(city_id)[2:]))
g = BinaryGene(dna, name='city ' + str(x))
genes.append(g)
choices = [g.dna for g in genes]
c = ReorderingSetChromosome(genes, choices)
chromosomes.append(c)
ts_ga = TravellingSalesmanGA(city_distances, chromosomes,
translator=BinaryIntTranslator(),
abs_fit_weight=0, rel_fit_weight=1)
p_mutate = 0.10
p_cross = 0.50
best = ts_ga.run(generations, p_mutate, p_cross, elitist=True, refresh_after=generations/2)
best_city_ids = ts_ga.translator.translate_chromosome(best)
best_dist = ts_ga.calc_distance(best)
print("run took", ts_ga.run_time_s, "seconds")
print("best solution =", best_city_ids)
print("best distance =", best_dist)
if plot:
if plt:
# plot fitness progression
plt.plot([v for k, v in sorted(ts_ga.overall_fittest_fit.items())], label='run best')
plt.plot([v for k, v in sorted(ts_ga.generation_fittest_fit.items())], label='gen best')
plt.legend(loc='best')
plt.show()
fig, ax = plt.subplots()
def iter_generations():
for gen in ts_ga.new_fittest_generations:
yield gen
def animate(generation):
chromosome = ts_ga.generation_fittest[generation]
ax.clear()
x, y = [], []
for city_id, point in city_points.items():
x.append(point[0])
y.append(point[1])
ax.plot(x, y, marker='s', linestyle='', label='cities', alpha=0.6)
# plot optimal route
chrom_city_ids = ts_ga.translator.translate_chromosome(chromosome)
dist = round(ts_ga.calc_distance(chromosome), 2)
ax.set_title("generation " + str(generation) + "\ndistance = " + str(dist))
for i, start_city_id in enumerate(chrom_city_ids):
end_city_idx = i + 1
if end_city_idx == num_cities:
# distance from last city to first
end_city_idx = 0
end_city_id = chrom_city_ids[end_city_idx]
x1, y1 = city_points[start_city_id]
x2, y2 = city_points[end_city_id]
mid_x = (x2 - x1) / 2 + x1
mid_y = (y2 - y1) / 2 + y1
plt.arrow(x1, y1, x2 - x1, y2 - y1, head_width=1.5, fc='k', ec='k', alpha=0.7, linestyle='dotted', length_includes_head=True)
plt.text(mid_x, mid_y, str(i + 1))
ani = animation.FuncAnimation(fig, animate, iter_generations,
repeat=True, interval=1000, repeat_delay=12000)
plt.show()
else:
print("Did not plot example results because matplotlib not installed")
|
gpl-2.0
|
jstoxrocky/statsmodels
|
statsmodels/examples/ex_multivar_kde.py
|
34
|
1504
|
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
from mpl_toolkits.mplot3d import axes3d
import statsmodels.api as sm
"""
This example illustrates the nonparametric estimation of a
bivariate bi-modal distribution that is a mixture of two normal
distributions.
author: George Panterov
"""
if __name__ == '__main__':
np.random.seed(123456)
# generate the data
nobs = 500
BW = 'cv_ml'
mu1 = [3, 4]
mu2 = [6, 1]
cov1 = np.asarray([[1, 0.7], [0.7, 1]])
cov2 = np.asarray([[1, -0.7], [-0.7, 1]])
ix = np.random.uniform(size=nobs) > 0.5
V = np.random.multivariate_normal(mu1, cov1, size=nobs)
V[ix, :] = np.random.multivariate_normal(mu2, cov2, size=nobs)[ix, :]
x = V[:, 0]
y = V[:, 1]
dens = sm.nonparametric.KDEMultivariate(data=[x, y], var_type='cc', bw=BW,
defaults=sm.nonparametric.EstimatorSettings(efficient=True))
supportx = np.linspace(min(x), max(x), 60)
supporty = np.linspace(min(y), max(y), 60)
X, Y = np.meshgrid(supportx, supporty)
edat = np.column_stack([X.ravel(), Y.ravel()])
Z = dens.pdf(edat).reshape(X.shape)
# plot
fig = plt.figure(1)
ax = fig.gca(projection='3d')
surf = ax.plot_surface(X, Y, Z, rstride=1, cstride=1, cmap=cm.jet,
linewidth=0, antialiased=False)
fig.colorbar(surf, shrink=0.5, aspect=5)
plt.figure(2)
plt.imshow(Z)
plt.show()
|
bsd-3-clause
|
rgommers/statsmodels
|
docs/sphinxext/numpy_ext/plot_directive.py
|
65
|
20399
|
"""
A special directive for generating a matplotlib plot.
.. warning::
This is a hacked version of plot_directive.py from Matplotlib.
It's very much subject to change!
Usage
-----
Can be used like this::
.. plot:: examples/example.py
.. plot::
import matplotlib.pyplot as plt
plt.plot([1,2,3], [4,5,6])
.. plot::
A plotting example:
>>> import matplotlib.pyplot as plt
>>> plt.plot([1,2,3], [4,5,6])
The content is interpreted as doctest formatted if it has a line starting
with ``>>>``.
The ``plot`` directive supports the options
format : {'python', 'doctest'}
Specify the format of the input
include-source : bool
Whether to display the source code. Default can be changed in conf.py
and the ``image`` directive options ``alt``, ``height``, ``width``,
``scale``, ``align``, ``class``.
Configuration options
---------------------
The plot directive has the following configuration options:
plot_include_source
Default value for the include-source option
plot_pre_code
Code that should be executed before each plot.
plot_basedir
Base directory, to which plot:: file names are relative to.
(If None or empty, file names are relative to the directoly where
the file containing the directive is.)
plot_formats
File formats to generate. List of tuples or strings::
[(suffix, dpi), suffix, ...]
that determine the file format and the DPI. For entries whose
DPI was omitted, sensible defaults are chosen.
plot_html_show_formats
Whether to show links to the files in HTML.
TODO
----
* Refactor Latex output; now it's plain images, but it would be nice
to make them appear side-by-side, or in floats.
"""
import sys, os, glob, shutil, imp, warnings, cStringIO, re, textwrap, traceback
import sphinx
import warnings
warnings.warn("A plot_directive module is also available under "
"matplotlib.sphinxext; expect this numpydoc.plot_directive "
"module to be deprecated after relevant features have been "
"integrated there.",
FutureWarning, stacklevel=2)
#------------------------------------------------------------------------------
# Registration hook
#------------------------------------------------------------------------------
def setup(app):
setup.app = app
setup.config = app.config
setup.confdir = app.confdir
app.add_config_value('plot_pre_code', '', True)
app.add_config_value('plot_include_source', False, True)
app.add_config_value('plot_formats', ['png', 'hires.png', 'pdf'], True)
app.add_config_value('plot_basedir', None, True)
app.add_config_value('plot_html_show_formats', True, True)
app.add_directive('plot', plot_directive, True, (0, 1, False),
**plot_directive_options)
#------------------------------------------------------------------------------
# plot:: directive
#------------------------------------------------------------------------------
from docutils.parsers.rst import directives
from docutils import nodes
def plot_directive(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
return run(arguments, content, options, state_machine, state, lineno)
plot_directive.__doc__ = __doc__
def _option_boolean(arg):
if not arg or not arg.strip():
# no argument given, assume used as a flag
return True
elif arg.strip().lower() in ('no', '0', 'false'):
return False
elif arg.strip().lower() in ('yes', '1', 'true'):
return True
else:
raise ValueError('"%s" unknown boolean' % arg)
def _option_format(arg):
return directives.choice(arg, ('python', 'lisp'))
def _option_align(arg):
return directives.choice(arg, ("top", "middle", "bottom", "left", "center",
"right"))
plot_directive_options = {'alt': directives.unchanged,
'height': directives.length_or_unitless,
'width': directives.length_or_percentage_or_unitless,
'scale': directives.nonnegative_int,
'align': _option_align,
'class': directives.class_option,
'include-source': _option_boolean,
'format': _option_format,
}
#------------------------------------------------------------------------------
# Generating output
#------------------------------------------------------------------------------
from docutils import nodes, utils
try:
# Sphinx depends on either Jinja or Jinja2
import jinja2
def format_template(template, **kw):
return jinja2.Template(template).render(**kw)
except ImportError:
import jinja
def format_template(template, **kw):
return jinja.from_string(template, **kw)
TEMPLATE = """
{{ source_code }}
{{ only_html }}
{% if source_link or (html_show_formats and not multi_image) %}
(
{%- if source_link -%}
`Source code <{{ source_link }}>`__
{%- endif -%}
{%- if html_show_formats and not multi_image -%}
{%- for img in images -%}
{%- for fmt in img.formats -%}
{%- if source_link or not loop.first -%}, {% endif -%}
`{{ fmt }} <{{ dest_dir }}/{{ img.basename }}.{{ fmt }}>`__
{%- endfor -%}
{%- endfor -%}
{%- endif -%}
)
{% endif %}
{% for img in images %}
.. figure:: {{ build_dir }}/{{ img.basename }}.png
{%- for option in options %}
{{ option }}
{% endfor %}
{% if html_show_formats and multi_image -%}
(
{%- for fmt in img.formats -%}
{%- if not loop.first -%}, {% endif -%}
`{{ fmt }} <{{ dest_dir }}/{{ img.basename }}.{{ fmt }}>`__
{%- endfor -%}
)
{%- endif -%}
{% endfor %}
{{ only_latex }}
{% for img in images %}
.. image:: {{ build_dir }}/{{ img.basename }}.pdf
{% endfor %}
"""
class ImageFile(object):
def __init__(self, basename, dirname):
self.basename = basename
self.dirname = dirname
self.formats = []
def filename(self, format):
return os.path.join(self.dirname, "%s.%s" % (self.basename, format))
def filenames(self):
return [self.filename(fmt) for fmt in self.formats]
def run(arguments, content, options, state_machine, state, lineno):
if arguments and content:
raise RuntimeError("plot:: directive can't have both args and content")
document = state_machine.document
config = document.settings.env.config
options.setdefault('include-source', config.plot_include_source)
# determine input
rst_file = document.attributes['source']
rst_dir = os.path.dirname(rst_file)
if arguments:
if not config.plot_basedir:
source_file_name = os.path.join(rst_dir,
directives.uri(arguments[0]))
else:
source_file_name = os.path.join(setup.confdir, config.plot_basedir,
directives.uri(arguments[0]))
code = open(source_file_name, 'r').read()
output_base = os.path.basename(source_file_name)
else:
source_file_name = rst_file
code = textwrap.dedent("\n".join(map(str, content)))
counter = document.attributes.get('_plot_counter', 0) + 1
document.attributes['_plot_counter'] = counter
base, ext = os.path.splitext(os.path.basename(source_file_name))
output_base = '%s-%d.py' % (base, counter)
base, source_ext = os.path.splitext(output_base)
if source_ext in ('.py', '.rst', '.txt'):
output_base = base
else:
source_ext = ''
# ensure that LaTeX includegraphics doesn't choke in foo.bar.pdf filenames
output_base = output_base.replace('.', '-')
# is it in doctest format?
is_doctest = contains_doctest(code)
if options.has_key('format'):
if options['format'] == 'python':
is_doctest = False
else:
is_doctest = True
# determine output directory name fragment
source_rel_name = relpath(source_file_name, setup.confdir)
source_rel_dir = os.path.dirname(source_rel_name)
while source_rel_dir.startswith(os.path.sep):
source_rel_dir = source_rel_dir[1:]
# build_dir: where to place output files (temporarily)
build_dir = os.path.join(os.path.dirname(setup.app.doctreedir),
'plot_directive',
source_rel_dir)
if not os.path.exists(build_dir):
os.makedirs(build_dir)
# output_dir: final location in the builder's directory
dest_dir = os.path.abspath(os.path.join(setup.app.builder.outdir,
source_rel_dir))
# how to link to files from the RST file
dest_dir_link = os.path.join(relpath(setup.confdir, rst_dir),
source_rel_dir).replace(os.path.sep, '/')
build_dir_link = relpath(build_dir, rst_dir).replace(os.path.sep, '/')
source_link = dest_dir_link + '/' + output_base + source_ext
# make figures
try:
results = makefig(code, source_file_name, build_dir, output_base,
config)
errors = []
except PlotError, err:
reporter = state.memo.reporter
sm = reporter.system_message(
2, "Exception occurred in plotting %s: %s" % (output_base, err),
line=lineno)
results = [(code, [])]
errors = [sm]
# generate output restructuredtext
total_lines = []
for j, (code_piece, images) in enumerate(results):
if options['include-source']:
if is_doctest:
lines = ['']
lines += [row.rstrip() for row in code_piece.split('\n')]
else:
lines = ['.. code-block:: python', '']
lines += [' %s' % row.rstrip()
for row in code_piece.split('\n')]
source_code = "\n".join(lines)
else:
source_code = ""
opts = [':%s: %s' % (key, val) for key, val in options.items()
if key in ('alt', 'height', 'width', 'scale', 'align', 'class')]
only_html = ".. only:: html"
only_latex = ".. only:: latex"
if j == 0:
src_link = source_link
else:
src_link = None
result = format_template(
TEMPLATE,
dest_dir=dest_dir_link,
build_dir=build_dir_link,
source_link=src_link,
multi_image=len(images) > 1,
only_html=only_html,
only_latex=only_latex,
options=opts,
images=images,
source_code=source_code,
html_show_formats=config.plot_html_show_formats)
total_lines.extend(result.split("\n"))
total_lines.extend("\n")
if total_lines:
state_machine.insert_input(total_lines, source=source_file_name)
# copy image files to builder's output directory
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
for code_piece, images in results:
for img in images:
for fn in img.filenames():
shutil.copyfile(fn, os.path.join(dest_dir,
os.path.basename(fn)))
# copy script (if necessary)
if source_file_name == rst_file:
target_name = os.path.join(dest_dir, output_base + source_ext)
f = open(target_name, 'w')
f.write(unescape_doctest(code))
f.close()
return errors
#------------------------------------------------------------------------------
# Run code and capture figures
#------------------------------------------------------------------------------
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.image as image
from matplotlib import _pylab_helpers
import exceptions
def contains_doctest(text):
try:
# check if it's valid Python as-is
compile(text, '<string>', 'exec')
return False
except SyntaxError:
pass
r = re.compile(r'^\s*>>>', re.M)
m = r.search(text)
return bool(m)
def unescape_doctest(text):
"""
Extract code from a piece of text, which contains either Python code
or doctests.
"""
if not contains_doctest(text):
return text
code = ""
for line in text.split("\n"):
m = re.match(r'^\s*(>>>|\.\.\.) (.*)$', line)
if m:
code += m.group(2) + "\n"
elif line.strip():
code += "# " + line.strip() + "\n"
else:
code += "\n"
return code
def split_code_at_show(text):
"""
Split code at plt.show()
"""
parts = []
is_doctest = contains_doctest(text)
part = []
for line in text.split("\n"):
if (not is_doctest and line.strip() == 'plt.show()') or \
(is_doctest and line.strip() == '>>> plt.show()'):
part.append(line)
parts.append("\n".join(part))
part = []
else:
part.append(line)
if "\n".join(part).strip():
parts.append("\n".join(part))
return parts
class PlotError(RuntimeError):
pass
def run_code(code, code_path, ns=None):
# Change the working directory to the directory of the example, so
# it can get at its data files, if any.
pwd = os.getcwd()
old_sys_path = list(sys.path)
if code_path is not None:
dirname = os.path.abspath(os.path.dirname(code_path))
os.chdir(dirname)
sys.path.insert(0, dirname)
# Redirect stdout
stdout = sys.stdout
sys.stdout = cStringIO.StringIO()
# Reset sys.argv
old_sys_argv = sys.argv
sys.argv = [code_path]
try:
try:
code = unescape_doctest(code)
if ns is None:
ns = {}
if not ns:
exec setup.config.plot_pre_code in ns
exec code in ns
except (Exception, SystemExit), err:
raise PlotError(traceback.format_exc())
finally:
os.chdir(pwd)
sys.argv = old_sys_argv
sys.path[:] = old_sys_path
sys.stdout = stdout
return ns
#------------------------------------------------------------------------------
# Generating figures
#------------------------------------------------------------------------------
def out_of_date(original, derived):
"""
Returns True if derivative is out-of-date wrt original,
both of which are full file paths.
"""
return (not os.path.exists(derived)
or os.stat(derived).st_mtime < os.stat(original).st_mtime)
def makefig(code, code_path, output_dir, output_base, config):
"""
Run a pyplot script *code* and save the images under *output_dir*
with file names derived from *output_base*
"""
# -- Parse format list
default_dpi = {'png': 80, 'hires.png': 200, 'pdf': 50}
formats = []
for fmt in config.plot_formats:
if isinstance(fmt, str):
formats.append((fmt, default_dpi.get(fmt, 80)))
elif type(fmt) in (tuple, list) and len(fmt)==2:
formats.append((str(fmt[0]), int(fmt[1])))
else:
raise PlotError('invalid image format "%r" in plot_formats' % fmt)
# -- Try to determine if all images already exist
code_pieces = split_code_at_show(code)
# Look for single-figure output files first
all_exists = True
img = ImageFile(output_base, output_dir)
for format, dpi in formats:
if out_of_date(code_path, img.filename(format)):
all_exists = False
break
img.formats.append(format)
if all_exists:
return [(code, [img])]
# Then look for multi-figure output files
results = []
all_exists = True
for i, code_piece in enumerate(code_pieces):
images = []
for j in xrange(1000):
img = ImageFile('%s_%02d_%02d' % (output_base, i, j), output_dir)
for format, dpi in formats:
if out_of_date(code_path, img.filename(format)):
all_exists = False
break
img.formats.append(format)
# assume that if we have one, we have them all
if not all_exists:
all_exists = (j > 0)
break
images.append(img)
if not all_exists:
break
results.append((code_piece, images))
if all_exists:
return results
# -- We didn't find the files, so build them
results = []
ns = {}
for i, code_piece in enumerate(code_pieces):
# Clear between runs
plt.close('all')
# Run code
run_code(code_piece, code_path, ns)
# Collect images
images = []
fig_managers = _pylab_helpers.Gcf.get_all_fig_managers()
for j, figman in enumerate(fig_managers):
if len(fig_managers) == 1 and len(code_pieces) == 1:
img = ImageFile(output_base, output_dir)
else:
img = ImageFile("%s_%02d_%02d" % (output_base, i, j),
output_dir)
images.append(img)
for format, dpi in formats:
try:
figman.canvas.figure.savefig(img.filename(format), dpi=dpi)
except exceptions.BaseException, err:
raise PlotError(traceback.format_exc())
img.formats.append(format)
# Results
results.append((code_piece, images))
return results
#------------------------------------------------------------------------------
# Relative pathnames
#------------------------------------------------------------------------------
try:
from os.path import relpath
except ImportError:
# Copied from Python 2.7
if 'posix' in sys.builtin_module_names:
def relpath(path, start=os.path.curdir):
"""Return a relative version of a path"""
from os.path import sep, curdir, join, abspath, commonprefix, \
pardir
if not path:
raise ValueError("no path specified")
start_list = abspath(start).split(sep)
path_list = abspath(path).split(sep)
# Work out how much of the filepath is shared by start and path.
i = len(commonprefix([start_list, path_list]))
rel_list = [pardir] * (len(start_list)-i) + path_list[i:]
if not rel_list:
return curdir
return join(*rel_list)
elif 'nt' in sys.builtin_module_names:
def relpath(path, start=os.path.curdir):
"""Return a relative version of a path"""
from os.path import sep, curdir, join, abspath, commonprefix, \
pardir, splitunc
if not path:
raise ValueError("no path specified")
start_list = abspath(start).split(sep)
path_list = abspath(path).split(sep)
if start_list[0].lower() != path_list[0].lower():
unc_path, rest = splitunc(path)
unc_start, rest = splitunc(start)
if bool(unc_path) ^ bool(unc_start):
raise ValueError("Cannot mix UNC and non-UNC paths (%s and %s)"
% (path, start))
else:
raise ValueError("path is on drive %s, start on drive %s"
% (path_list[0], start_list[0]))
# Work out how much of the filepath is shared by start and path.
for i in range(min(len(start_list), len(path_list))):
if start_list[i].lower() != path_list[i].lower():
break
else:
i += 1
rel_list = [pardir] * (len(start_list)-i) + path_list[i:]
if not rel_list:
return curdir
return join(*rel_list)
else:
raise RuntimeError("Unsupported platform (no relpath available!)")
|
bsd-3-clause
|
zCFD/zutil
|
zutil/post/post.py
|
1
|
38113
|
""" Helper functions for accessing Paraview functionality
.. moduleauthor:: Zenotech Ltd
"""
from tqdm import tqdm
from IPython.display import HTML, Javascript, display
import uuid
import csv
from zutil import analysis
import time
import math
from zutil import mag
import json
from zutil import rotate_vector
from paraview.simple import *
from builtins import object
from past.utils import old_div
from builtins import range
from builtins import str
from future import standard_library
standard_library.install_aliases()
# from paraview.vtk.util import numpy_support
try:
from paraview.vtk.dataset_adapter import numpyTovtkDataArray
from paraview.vtk.dataset_adapter import Table
from paraview.vtk.dataset_adapter import PolyData
from paraview.vtk.dataset_adapter import DataSetAttributes
from paraview.vtk.dataset_adapter import DataSet
from paraview.vtk.dataset_adapter import CompositeDataSet
from paraview.vtk.dataset_adapter import PointSet
except:
from paraview.vtk.numpy_interface.dataset_adapter import numpyTovtkDataArray
from paraview.vtk.numpy_interface.dataset_adapter import Table
from paraview.vtk.numpy_interface.dataset_adapter import PolyData
from paraview.vtk.numpy_interface.dataset_adapter import DataSetAttributes
from paraview.vtk.numpy_interface.dataset_adapter import DataSet
from paraview.vtk.numpy_interface.dataset_adapter import CompositeDataSet
from paraview.vtk.numpy_interface.dataset_adapter import PointSet
def sum_and_zone_filter_array(input, array_name, ignore_zone, filter=None):
sum = [0.0, 0.0, 0.0]
p = input.GetCellData().GetArray(array_name)
z = input.GetCellData().GetArray("zone")
numCells = input.GetNumberOfCells()
for x in range(numCells):
if len(ignore_zone) == 0:
v = p.GetTuple(x)
for i in range(0, 3):
sum[i] += v[i]
else:
zone = z.GetValue(x)
if zone not in ignore_zone:
v = p.GetTuple(x)
if filter is None or filter.test(input, x):
# print 'Zone: %i'%(zone)
for i in range(0, 3):
sum[i] += v[i]
return sum
def sum_and_zone_filter(input, array_name, ignore_zone, filter=None):
sum = [0.0, 0.0, 0.0]
if input.IsA("vtkMultiBlockDataSet"):
iter = input.NewIterator()
iter.UnRegister(None)
iter.InitTraversal()
while not iter.IsDoneWithTraversal():
cur_input = iter.GetCurrentDataObject()
v = sum_and_zone_filter_array(cur_input, array_name, ignore_zone, filter)
for i in range(0, 3):
sum[i] += v[i]
iter.GoToNextItem()
else:
sum = sum_and_zone_filter_array(input, array_name, ignore_zone, filter)
return sum
class GeomFilterLT(object):
def __init__(self, val, idx):
#
self.val = val
self.idx = idx
def test(self, input, x):
centre = input.GetCellData().GetArray("centre").GetTuple(x)
if centre[self.idx] < self.val:
return True
else:
return False
class GeomFilterGT(object):
def __init__(self, val, idx):
#
self.val = val
self.idx = idx
def test(self, input, x):
centre = input.GetCellData().GetArray("centre").GetTuple(x)
if centre[self.idx] >= self.val:
return True
else:
return False
def calc_force_from_file(
file_name, ignore_zone, half_model=False, filter=None, **kwargs
):
""" Calculates the pressure and friction force
This function requires that the VTK file contains three cell data arrays
called pressureforce, frictionforce and zone
Args:
file_name (str): the VTK file name including path
ignore_zone (list): List of zones to be ignored
Kwargs:
half_nodel (bool): Does the data represent only half of the model
filter (function):
Returns:
float, float. pressure force and friction force
"""
wall = PVDReader(FileName=file_name)
wall.UpdatePipeline()
return calc_force(wall, ignore_zone, half_model, filter, kwargs)
def calc_force_wall(file_root, ignore_zone, half_model=False, filter=None, **kwargs):
wall = PVDReader(FileName=file_root + "_wall.pvd")
wall.UpdatePipeline()
force = calc_force(wall, ignore_zone, half_model, filter, **kwargs)
Delete(wall)
del wall
return force
def calc_force(surface_data, ignore_zone, half_model=False, filter=None, **kwargs):
alpha = 0.0
if "alpha" in kwargs:
alpha = kwargs["alpha"]
beta = 0.0
if "beta" in kwargs:
beta = kwargs["beta"]
sum_client = servermanager.Fetch(surface_data)
pforce = sum_and_zone_filter(sum_client, "pressureforce", ignore_zone, filter)
fforce = sum_and_zone_filter(sum_client, "frictionforce", ignore_zone, filter)
pforce = rotate_vector(pforce, alpha, beta)
fforce = rotate_vector(fforce, alpha, beta)
if half_model:
for i in range(0, 3):
pforce[i] *= 2.0
fforce[i] *= 2.0
del sum_client
return pforce, fforce
def calc_moment(surface_data, ignore_zone, half_model=False, filter=None, **kwargs):
alpha = 0.0
if "alpha" in kwargs:
alpha = kwargs["alpha"]
beta = 0.0
if "beta" in kwargs:
beta = kwargs["beta"]
if "ref_pt" in kwargs:
sum_client = servermanager.Fetch(surface_data)
if sum_client.GetCellData().GetArray("pressuremomentx"):
pmoment = sum_and_zone_filter(
sum_client, "pressuremomentx", ignore_zone, filter
)
fmoment = sum_and_zone_filter(
sum_client, "frictionmomentx", ignore_zone, filter
)
pmoment = rotate_vector(pmoment, alpha, beta)
fmoment = rotate_vector(fmoment, alpha, beta)
# fforce = rotate_vector(fforce,alpha,beta)
if half_model:
# This is only valid for X-Z plane reflection
pmoment[0] += -pmoment[0]
pmoment[1] += pmoment[1]
pmoment[2] += -pmoment[2]
fmoment[0] += -fmoment[0]
fmoment[1] += fmoment[1]
fmoment[2] += -fmoment[2]
return pmoment, fmoment
else:
sum_client = servermanager.Fetch(surface_data)
pmoment = sum_and_zone_filter(sum_client, "pressuremoment", ignore_zone, filter)
fmoment = sum_and_zone_filter(sum_client, "frictionmoment", ignore_zone, filter)
pmoment = rotate_vector(pmoment, alpha, beta)
fmoment = rotate_vector(fmoment, alpha, beta)
# fforce = rotate_vector(fforce,alpha,beta)
if half_model:
# This is only valid for X-Z plane reflection
pmoment[0] += -pmoment[0]
pmoment[1] += pmoment[1]
pmoment[2] += -pmoment[2]
fmoment[0] += -fmoment[0]
fmoment[1] += fmoment[1]
fmoment[2] += -fmoment[2]
return pmoment, fmoment
def calc_lift_centre_of_action(force, moment, ref_point):
# longitudinal centre xs0 at zs0
# spanwise centre ys0 at zs0
# residual Mz moment (Mx=My=0) mzs0
xs0 = ref_point[0] - moment[1] / force[2]
ys0 = ref_point[1] + moment[0] / force[2]
zs0 = ref_point[2]
mzs0 = moment[2] - force[1] * (xs0 - ref_point[0]) + force[0] * (ys0 - ref_point[1])
return (xs0, ys0, zs0), mzs0
def calc_drag_centre_of_action(force, moment, ref_point):
# longitudinal centre xs0 at zs0
# spanwise centre ys0 at zs0
# residual Mz moment (Mx=My=0) mzs0
zs0 = ref_point[2] + moment[1] / force[0]
ys0 = ref_point[1] - moment[2] / force[0]
xs0 = ref_point[0]
# moment[2] - force[1]*(xs0-ref_point[0]) + force[0]*(ys0-ref_point[1])
mzs0 = 0.0
return (xs0, ys0, zs0), mzs0
def move_moment_ref_point(moment, ref_point, new_ref_point):
pass
def get_span(wall):
""" Returns the min and max y ordinate
Args:
wall (vtkMultiBlockDataSet): The input surface
Returns:
(float,float). Min y, Max y
"""
Calculator1 = Calculator(Input=wall)
Calculator1.AttributeType = "Point Data"
Calculator1.Function = "coords.jHat"
Calculator1.ResultArrayName = "ypos"
Calculator1.UpdatePipeline()
ymin = MinMax(Input=Calculator1)
ymin.Operation = "MIN"
ymin.UpdatePipeline()
ymin_client = servermanager.Fetch(ymin)
min_pos = ymin_client.GetPointData().GetArray("ypos").GetValue(0)
ymax = MinMax(Input=Calculator1)
ymax.Operation = "MAX"
ymax.UpdatePipeline()
ymax_client = servermanager.Fetch(ymax)
max_pos = ymax_client.GetPointData().GetArray("ypos").GetValue(0)
Delete(ymin)
Delete(ymax)
Delete(Calculator1)
return [min_pos, max_pos]
def get_chord(slice, rotate_geometry=[0.0, 0.0, 0.0]):
""" Returns the min and max x ordinate
Args:
wall (vtkMultiBlockDataSet): The input surface
Returns:
(float,float). Min x, Max x
"""
transform = Transform(Input=slice, Transform="Transform")
transform.Transform.Scale = [1.0, 1.0, 1.0]
transform.Transform.Translate = [0.0, 0.0, 0.0]
transform.Transform.Rotate = rotate_geometry
transform.UpdatePipeline()
Calculator1 = Calculator(Input=transform)
Calculator1.AttributeType = "Point Data"
Calculator1.Function = "coords.iHat"
Calculator1.ResultArrayName = "xpos"
Calculator1.UpdatePipeline()
xmin = MinMax(Input=Calculator1)
xmin.Operation = "MIN"
xmin.UpdatePipeline()
xmin_client = servermanager.Fetch(xmin)
min_pos = xmin_client.GetPointData().GetArray("xpos").GetValue(0)
xmax = MinMax(Input=Calculator1)
xmax.Operation = "MAX"
xmax.UpdatePipeline()
xmax_client = servermanager.Fetch(xmax)
max_pos = xmax_client.GetPointData().GetArray("xpos").GetValue(0)
Delete(xmin)
Delete(xmax)
Delete(Calculator1)
Delete(transform)
return [min_pos, max_pos]
def get_chord_spanwise(slice):
Calculator1 = Calculator(Input=slice)
Calculator1.AttributeType = "Point Data"
Calculator1.Function = "coords.jHat"
Calculator1.ResultArrayName = "ypos"
Calculator1.UpdatePipeline()
ymin = MinMax(Input=Calculator1)
ymin.Operation = "MIN"
ymin.UpdatePipeline()
ymin_client = servermanager.Fetch(ymin)
min_pos = ymin_client.GetPointData().GetArray("ypos").GetValue(0)
ymax = MinMax(Input=Calculator1)
ymax.Operation = "MAX"
ymax.UpdatePipeline()
ymax_client = servermanager.Fetch(ymax)
max_pos = ymax_client.GetPointData().GetArray("ypos").GetValue(0)
Delete(ymin)
Delete(ymax)
Delete(Calculator1)
return [min_pos, max_pos]
def get_monitor_data(file, monitor_name, var_name):
""" Return the _report file data corresponding to a monitor point and variable name
"""
monitor = CSVReader(FileName=[file])
monitor.HaveHeaders = 1
monitor.MergeConsecutiveDelimiters = 1
monitor.UseStringDelimiter = 0
monitor.DetectNumericColumns = 1
monitor.FieldDelimiterCharacters = " "
monitor.UpdatePipeline()
monitor_client = servermanager.Fetch(monitor)
table = Table(monitor_client)
data = table.RowData
names = list(data.keys())
num_var = len(names) - 2
if str(monitor_name) + "_" + str(var_name) in names:
index = names.index(str(monitor_name) + "_" + str(var_name))
return (data[names[0]], data[names[index]])
else:
print(
"POST.PY: MONITOR POINT: "
+ str(monitor_name)
+ "_"
+ str(var_name)
+ " NOT FOUND"
)
def residual_plot(file, pl):
""" Plot the _report file
"""
l2norm = CSVReader(FileName=[file])
l2norm.HaveHeaders = 1
l2norm.MergeConsecutiveDelimiters = 1
l2norm.UseStringDelimiter = 0
l2norm.DetectNumericColumns = 1
l2norm.FieldDelimiterCharacters = " "
l2norm.UpdatePipeline()
l2norm_client = servermanager.Fetch(l2norm)
table = Table(l2norm_client)
data = table.RowData
names = list(data.keys())
num_var = len(names) - 2
num_rows = (old_div((num_var - 1), 4)) + 1
fig = pl.figure(figsize=(40, 10 * num_rows), dpi=100, facecolor="w", edgecolor="k")
fig.suptitle(file, fontsize=40, fontweight="bold")
for i in range(1, num_var + 1):
var_name = names[i]
ax = fig.add_subplot(num_rows, 4, i)
if "rho" in var_name:
ax.set_yscale("log")
ax.set_ylabel("l2norm " + var_name, multialignment="center")
else:
ax.set_ylabel(var_name, multialignment="center")
ax.grid(True)
ax.set_xlabel("Cycles")
ax.plot(data[names[0]], data[names[i]], color="r", label=names[i])
def for_each(surface, func, **kwargs):
if surface.IsA("vtkMultiBlockDataSet"):
iter = surface.NewIterator()
iter.UnRegister(None)
iter.InitTraversal()
while not iter.IsDoneWithTraversal():
cur_input = iter.GetCurrentDataObject()
# numCells = cur_input.GetNumberOfCells()
numPts = cur_input.GetNumberOfPoints()
if numPts > 0:
calc = DataSet(cur_input)
pts = PointSet(cur_input)
func(calc, pts, **kwargs)
iter.GoToNextItem()
else:
calc = DataSet(surface)
pts = PointSet(surface)
func(calc, pts, **kwargs)
def cp_profile_wall_from_file(file_root, slice_normal, slice_origin, **kwargs):
wall = PVDReader(FileName=file_root + "_wall.pvd")
clean = CleantoGrid(Input=wall)
clean.UpdatePipeline()
inp = servermanager.Fetch(clean)
if inp.IsA("vtkMultiBlockDataSet"):
inp = MergeBlocks(Input=clean)
else:
inp = clean
Delete(wall)
del wall
profile = cp_profile(inp, slice_normal, slice_origin, **kwargs)
Delete(clean)
del clean
Delete(inp)
del inp
return profile
def cp_profile_wall_from_file_span(file_root, slice_normal, slice_origin, **kwargs):
wall = PVDReader(FileName=file_root + "_wall.pvd")
clean = CleantoGrid(Input=wall)
clean.UpdatePipeline()
inp = servermanager.Fetch(clean)
if inp.IsA("vtkMultiBlockDataSet"):
inp = MergeBlocks(Input=clean)
else:
inp = clean
Delete(wall)
del wall
profile = cp_profile_span(inp, slice_normal, slice_origin, **kwargs)
Delete(clean)
del clean
Delete(inp)
del inp
return profile
def cp_profile(surface, slice_normal, slice_origin, **kwargs):
alpha = 0.0
if "alpha" in kwargs:
alpha = kwargs["alpha"]
beta = 0.0
if "beta" in kwargs:
beta = kwargs["beta"]
time_average = False
if "time_average" in kwargs:
time_average = kwargs["time_average"]
rotate_geometry = [0.0, 0.0, 0.0]
if "rotate_geometry" in kwargs:
rotate_geometry = kwargs["rotate_geometry"]
clean = CleantoGrid(Input=surface)
clean.UpdatePipeline()
point_data = CellDatatoPointData(Input=clean)
point_data.PassCellData = 1
Delete(clean)
del clean
if "filter" in kwargs:
filter_zones = kwargs["filter"]
calc_str = "".join("(zone={:d})|".format(i) for i in filter_zones)
filter_data = Calculator(Input=point_data)
filter_data.AttributeType = "Cell Data"
filter_data.Function = "if (" + calc_str[:-1] + ", 1, 0)"
filter_data.ResultArrayName = "zonefilter"
filter_data.UpdatePipeline()
Delete(point_data)
del point_data
point_data = Threshold(Input=filter_data)
point_data.Scalars = ["CELLS", "zonefilter"]
point_data.ThresholdRange = [1.0, 1.0]
point_data.UpdatePipeline()
Delete(filter_data)
del filter_data
surf = ExtractSurface(Input=point_data)
surf_normals = GenerateSurfaceNormals(Input=surf)
surf_normals.UpdatePipeline()
Delete(surf)
del surf
Delete(point_data)
del point_data
point_data = surf_normals
slice = Slice(Input=point_data, SliceType="Plane")
slice.SliceType.Normal = slice_normal
slice.SliceType.Origin = slice_origin
slice.UpdatePipeline()
Delete(point_data)
del point_data
if time_average:
temporal = TemporalStatistics(Input=slice)
temporal.ComputeMaximum = 0
temporal.ComputeStandardDeviation = 0
temporal.ComputeMinimum = 0
temporal.UpdatePipeline()
Delete(slice)
del slice
slice = temporal
offset = get_chord(slice, rotate_geometry)
transform = Transform(Input=slice, Transform="Transform")
transform.Transform.Scale = [1.0, 1.0, 1.0]
transform.Transform.Translate = [0.0, 0.0, 0.0]
transform.Transform.Rotate = rotate_geometry
transform.UpdatePipeline()
if "chord_func" in kwargs:
pass
else:
chord_calc = Calculator(Input=transform)
chord_calc.AttributeType = "Point Data"
chord_calc.Function = (
"(coords.iHat - " + str(offset[0]) + ")/" + str(offset[1] - offset[0])
)
chord_calc.ResultArrayName = "chord"
# Attempt to calculate forces
pforce = [0.0, 0.0, 0.0]
fforce = [0.0, 0.0, 0.0]
pmoment = [0.0, 0.0, 0.0]
fmoment = [0.0, 0.0, 0.0]
pmomentx = [0.0, 0.0, 0.0]
fmomentx = [0.0, 0.0, 0.0]
pmomenty = [0.0, 0.0, 0.0]
fmomenty = [0.0, 0.0, 0.0]
pmomentz = [0.0, 0.0, 0.0]
fmomentz = [0.0, 0.0, 0.0]
sum = MinMax(Input=slice)
sum.Operation = "SUM"
sum.UpdatePipeline()
sum_client = servermanager.Fetch(sum)
if sum_client.GetCellData().GetArray("pressureforce"):
pforce = sum_client.GetCellData().GetArray("pressureforce").GetTuple(0)
pforce = rotate_vector(pforce, alpha, beta)
if sum_client.GetCellData().GetArray("frictionforce"):
fforce = sum_client.GetCellData().GetArray("frictionforce").GetTuple(0)
fforce = rotate_vector(fforce, alpha, beta)
"""
# Add sectional force integration
sorted_line = PlotOnSortedLines(Input=chord_calc)
sorted_line.UpdatePipeline()
sorted_line = servermanager.Fetch(sorted_line)
cp_array = sorted_line.GetCellData().GetArray("cp")
for i in range(0,len(cp_array)):
sorted_line.GetPointData().GetArray("X")
pass
"""
if sum_client.GetCellData().GetArray("pressuremoment"):
pmoment = sum_client.GetCellData().GetArray("pressuremoment").GetTuple(0)
pmoment = rotate_vector(pmoment, alpha, beta)
if sum_client.GetCellData().GetArray("frictionmoment"):
fmoment = sum_client.GetCellData().GetArray("frictionmoment").GetTuple(0)
fmoment = rotate_vector(fmoment, alpha, beta)
if sum_client.GetCellData().GetArray("pressuremomentx"):
pmomentx = sum_client.GetCellData().GetArray("pressuremomentx").GetTuple(0)
pmomentx = rotate_vector(pmomentx, alpha, beta)
if sum_client.GetCellData().GetArray("frictionmomentx"):
fmomentx = sum_client.GetCellData().GetArray("frictionmomentx").GetTuple(0)
fmomentx = rotate_vector(fmomentx, alpha, beta)
if "func" in kwargs:
sorted_line = PlotOnSortedLines(Input=chord_calc)
sorted_line.UpdatePipeline()
extract_client = servermanager.Fetch(sorted_line)
for_each(extract_client, **kwargs)
Delete(chord_calc)
del chord_calc
Delete(sum)
del sum
del sum_client
Delete(slice)
del slice
Delete(sorted_line)
del sorted_line
del extract_client
return {
"pressure force": pforce,
"friction force": fforce,
"pressure moment": pmoment,
"friction moment": fmoment,
}
def cp_profile_span(surface, slice_normal, slice_origin, **kwargs):
alpha = 0.0
if "alpha" in kwargs:
alpha = kwargs["alpha"]
beta = 0.0
if "beta" in kwargs:
beta = kwargs["beta"]
point_data = CellDatatoPointData(Input=surface)
point_data.PassCellData = 1
clip = Clip(Input=point_data, ClipType="Plane")
clip.ClipType.Normal = [0.0, 1.0, 0.0]
clip.ClipType.Origin = [0.0, 0.0, 0.0]
clip.UpdatePipeline()
slice = Slice(Input=clip, SliceType="Plane")
slice.SliceType.Normal = slice_normal
slice.SliceType.Origin = slice_origin
slice.UpdatePipeline()
offset = get_chord_spanwise(slice)
# define the cuts and make sure the is the one one you want
# make the
chord_calc = Calculator(Input=slice)
chord_calc.AttributeType = "Point Data"
chord_calc.Function = (
"(coords.jHat - " + str(offset[0]) + ")/" + str(offset[1] - offset[0])
)
chord_calc.ResultArrayName = "chord"
sum = MinMax(Input=slice)
sum.Operation = "SUM"
sum.UpdatePipeline()
sum_client = servermanager.Fetch(sum)
pforce = sum_client.GetCellData().GetArray("pressureforce").GetTuple(0)
fforce = sum_client.GetCellData().GetArray("frictionforce").GetTuple(0)
pforce = rotate_vector(pforce, alpha, beta)
fforce = rotate_vector(fforce, alpha, beta)
if "func" in kwargs:
sorted_line = PlotOnSortedLines(Input=chord_calc)
sorted_line.UpdatePipeline()
extract_client = servermanager.Fetch(sorted_line)
for_each(extract_client, **kwargs)
return {"pressure force": pforce, "friction force": fforce}
def cf_profile_wall_from_file(file_root, slice_normal, slice_origin, **kwargs):
wall = PVDReader(FileName=file_root + "_wall.pvd")
clean = CleantoGrid(Input=wall)
clean.UpdatePipeline()
inp = servermanager.Fetch(clean)
if inp.IsA("vtkMultiBlockDataSet"):
inp = MergeBlocks(Input=clean)
else:
inp = clean
Delete(wall)
del wall
profile = cf_profile(inp, slice_normal, slice_origin, **kwargs)
Delete(clean)
del clean
Delete(inp)
del inp
return profile
def cf_profile(surface, slice_normal, slice_origin, **kwargs):
alpha = 0.0
if "alpha" in kwargs:
alpha = kwargs["alpha"]
beta = 0.0
if "beta" in kwargs:
beta = kwargs["beta"]
point_data = CellDatatoPointData(Input=surface)
point_data.PassCellData = 1
slice = Slice(Input=point_data, SliceType="Plane")
slice.SliceType.Normal = slice_normal
slice.SliceType.Origin = slice_origin
slice.UpdatePipeline()
offset = get_chord(slice)
chord_calc = Calculator(Input=slice)
chord_calc.AttributeType = "Point Data"
chord_calc.Function = (
"(coords.iHat - " + str(offset[0]) + ")/" + str(offset[1] - offset[0])
)
chord_calc.ResultArrayName = "chord"
cf_calc = Calculator(Input=chord_calc)
cf_calc.AttributeType = "Point Data"
cf_calc.Function = "mag(cf)"
cf_calc.ResultArrayName = "cfmag"
sum = MinMax(Input=slice)
sum.Operation = "SUM"
sum.UpdatePipeline()
sum_client = servermanager.Fetch(sum)
pforce = sum_client.GetCellData().GetArray("pressureforce").GetTuple(0)
fforce = sum_client.GetCellData().GetArray("frictionforce").GetTuple(0)
pforce = rotate_vector(pforce, alpha, beta)
fforce = rotate_vector(fforce, alpha, beta)
if "func" in kwargs:
sorted_line = PlotOnSortedLines(Input=cf_calc)
sorted_line.UpdatePipeline()
extract_client = servermanager.Fetch(sorted_line)
for_each(extract_client, **kwargs)
return {"pressure force": pforce, "friction force": fforce}
def get_csv_data(filename, header=False, remote=False, delim=" "):
""" Get csv data
"""
if remote:
theory = CSVReader(FileName=[filename])
theory.HaveHeaders = 0
if header:
theory.HaveHeaders = 1
theory.MergeConsecutiveDelimiters = 1
theory.UseStringDelimiter = 0
theory.DetectNumericColumns = 1
theory.FieldDelimiterCharacters = delim
theory.UpdatePipeline()
theory_client = servermanager.Fetch(theory)
table = Table(theory_client)
data = table.RowData
else:
import pandas as pd
if not header:
data = pd.read_csv(filename, sep=delim, header=None)
else:
data = pd.read_csv(filename, sep=delim)
return data
def get_fw_csv_data(filename, widths, header=False, remote=False, **kwargs):
if remote:
theory = CSVReader(FileName=[filename])
theory.HaveHeaders = 0
theory.MergeConsecutiveDelimiters = 1
theory.UseStringDelimiter = 0
theory.DetectNumericColumns = 1
theory.FieldDelimiterCharacters = " "
theory.UpdatePipeline()
theory_client = servermanager.Fetch(theory)
table = Table(theory_client)
data = table.RowData
else:
import pandas as pd
if not header:
data = pd.read_fwf(filename, sep=" ", header=None, widths=widths, **kwargs)
else:
data = pd.read_fwf(filename, sep=" ", width=widths, **kwargs)
return data
def screenshot(wall):
# position camera
view = GetActiveView()
if not view:
# When using the ParaView UI, the View will be present, not otherwise.
view = CreateRenderView()
view.CameraViewUp = [0, 0, 1]
view.CameraFocalPoint = [0, 0, 0]
view.CameraViewAngle = 45
view.CameraPosition = [5, 0, 0]
# draw the object
Show()
# set the background color
view.Background = [1, 1, 1] # white
# set image size
view.ViewSize = [200, 300] # [width, height]
dp = GetDisplayProperties()
# set point color
dp.AmbientColor = [1, 0, 0] # red
# set surface color
dp.DiffuseColor = [0, 1, 0] # blue
# set point size
dp.PointSize = 2
# set representation
dp.Representation = "Surface"
Render()
# save screenshot
WriteImage("test.png")
def sum_array(input, array_name):
sum = [0.0, 0.0, 0.0]
p = input.GetCellData().GetArray(array_name)
numCells = input.GetNumberOfCells()
for x in range(numCells):
v = p.GetTuple(x)
for i in range(0, 3):
sum[i] += v[i]
return sum
def get_case_file():
with cd(remote_dir):
get(case_name + ".py", "%(path)s")
def cat_case_file(remote_dir, case_name):
with cd(remote_dir):
with hide("output", "running", "warnings"), settings(warn_only=True):
# cmd = 'cat '+case_name+'.py'
import io
contents = io.StringIO()
get(case_name + ".py", contents)
# operate on 'contents' like a file object here, e.g. 'print
return contents.getvalue()
def cat_status_file(remote_dir, case_name):
with cd(remote_dir), hide("output", "running", "warnings"), settings(
warn_only=True
):
# cmd = 'cat '+case_name+'_status.txt'
import io
contents = io.StringIO()
result = get(case_name + "_status.txt", contents)
if result.succeeded:
# operate on 'contents' like a file object here, e.g. 'print
return contents.getvalue()
else:
return None
def get_case_parameters_str(case_name, **kwargs):
# global remote_data, data_dir, data_host, remote_server_auto, paraview_cmd
_remote_dir = analysis.data.data_dir
if "data_dir" in kwargs:
_remote_dir = kwargs["data_dir"]
_remote_host = analysis.data.data_host
if "data_host" in kwargs:
_remote_host = kwargs["data_host"]
_remote_data = analysis.data.remote_data
if "remote_data" in kwargs:
_remote_data = kwargs["remote_data"]
if _remote_data:
env.use_ssh_config = True
env.host_string = _remote_host
case_file_str = cat_case_file(_remote_dir, case_name)
return case_file_str
else:
try:
# Get contents of local file
with open(_remote_dir + "/" + case_name + ".py") as f:
case_file_str = f.read()
if case_file_str is not None:
# print status_file_str
return case_file_str
else:
print("WARNING: " + case_name + ".py file not found")
return None
except:
print("WARNING: " + case_name + ".py file not found")
return None
def get_case_parameters(case_name, **kwargs):
case_file_str = get_case_parameters_str(case_name, **kwargs)
namespace = {}
exec(case_file_str, namespace)
return namespace["parameters"]
def get_status_dict(case_name, **kwargs):
# global remote_data, data_dir, data_host, remote_server_auto, paraview_cmd
_remote_data = analysis.data.remote_data
if "remote_data" in kwargs:
_remote_data = kwargs["remote_data"]
_remote_dir = analysis.data.data_dir
if "data_dir" in kwargs:
_remote_dir = kwargs["data_dir"]
if _remote_data:
_remote_host = analysis.data.data_host
if "data_host" in kwargs:
_remote_host = kwargs["data_host"]
env.use_ssh_config = True
env.host_string = _remote_host
status_file_str = cat_status_file(_remote_dir, case_name)
if status_file_str is not None:
# print status_file_str
return json.loads(status_file_str)
else:
print("WARNING: " + case_name + "_status.txt file not found")
return None
else:
try:
# Get contents of local file
with open(_remote_dir + "/" + case_name + "_status.txt") as f:
status_file_str = f.read()
if status_file_str is not None:
# print status_file_str
return json.loads(status_file_str)
else:
print("WARNING: " + case_name + "_status.txt file not found")
return None
except Exception as e:
print("WARNING: " + case_name + "_status.txt file not found")
print("Caught exception " + str(e))
return None
def get_num_procs(case_name, **kwargs):
# remote_host,remote_dir,case_name):
status = get_status_dict(case_name, **kwargs)
if status is not None:
if "num processor" in status:
return status["num processor"]
else:
return None
else:
print("status file not found")
def get_case_root(case_name, num_procs=None):
if num_procs is None:
num_procs = get_num_procs(case_name)
return case_name + "_P" + str(num_procs) + "_OUTPUT/" + case_name
def get_case_report(case):
return case + "_report.csv"
def print_html_parameters(parameters):
reference = parameters["reference"]
# material = parameters['material']
conditions = parameters[reference]
mach = 0.0
speed = 0.0
if "Mach" in conditions["V"]:
mach = conditions["V"]["Mach"]
speed = 0.0
else:
speed = mag(conditions["V"]["vector"])
mach = 0.0
if "Reynolds No" in conditions:
reynolds = conditions["Reynolds No"]
else:
reynolds = "undefined"
if "Reference Length" in conditions:
reflength = conditions["Reference Length"]
else:
reflength = "undefined"
import string
html_template = """<table>
<tr><td>pressure</td><td>$pressure</td></tr>
<tr><td>temperature</td><td>$temperature</td></tr>
<tr><td>Reynolds No</td><td>$reynolds</td></tr>
<tr><td>Ref length</td><td>$reflength</td></tr>
<tr><td>Speed</td><td>$speed</td></tr>
<tr><td>Mach No</td><td>$mach</td></tr>
</table>"""
html_output = string.Template(html_template)
return html_output.substitute(
{
"pressure": conditions["pressure"],
"temperature": conditions["temperature"],
"reynolds": reynolds,
"reflength": reflength,
"speed": speed,
"mach": mach,
}
)
class ProgressBar(object):
def __init__(self):
self.pbar = tqdm(total=100)
def __iadd__(self, v):
self.pbar.update(v)
return self
def complete(self):
self.pbar.close()
def update(self, i):
self.pbar.update(i)
# remote_data = True
# data_host = 'user@server'
# data_dir = 'data'
# remote_server_auto = True
# paraview_cmd = 'mpiexec pvserver'
# paraview_home = '/usr/local/bin/'
# job_queue = 'default'
# job_tasks = 1
# job_ntaskpernode = 1
# job_project = 'default'
def data_location_form_html(**kwargs):
global remote_data, data_dir, data_host, remote_server_auto, paraview_cmd
global job_queue, job_tasks, job_ntaskpernode, job_project
if "data_dir" in kwargs:
data_dir = kwargs["data_dir"]
if "paraview_cmd" in kwargs:
paraview_cmd = kwargs["paraview_cmd"]
if "data_host" in kwargs:
data_host = kwargs["data_host"]
remote_data_checked = ""
if remote_data:
remote_data_checked = 'checked="checked"'
remote_server_auto_checked = ""
if remote_server_auto:
remote_server_auto_checked = 'checked="checked"'
remote_cluster_checked = ""
job_queue = "default"
job_tasks = 1
job_ntaskpernode = 1
job_project = "default"
input_form = """
<div style="background-color:gainsboro; border:solid black; width:640px; padding:20px;">
<label style="width:22%;display:inline-block">Remote Data</label>
<input type="checkbox" id="remote_data" value="remote" {remote_data_checked}><br>
<label style="width:22%;display:inline-block">Data Directory</label>
<input style="width:75%;" type="text" id="data_dir" value="{data_dir}"><br>
<label style="width:22%;display:inline-block">Data Host</label>
<input style="width:75%;" type="text" id="data_host" value="{data_host}"><br>
<label style="width:22%;display:inline-block">Remote Server Auto</label>
<input type="checkbox" id="remote_server_auto" value="remote_auto" {remote_server_auto_checked}><br>
<label style="width:22%;display:inline-block">Paraview Cmd </label>
<input style="width:75%;" type="text" id="paraview_cmd" value="{paraview_cmd}"><br>
<label style="width:22%;display:inline-block">Remote Cluster</label>
<input type="checkbox" id="remote_cluster" value="remote_cluster" {remote_cluster_checked}><br>
<label style="width:22%;display:inline-block">Job Queue </label>
<input style="width:75%;" type="text" id="job_queue" value="{job_queue}"><br>
<label style="width:22%;display:inline-block">Job Tasks </label>
<input style="width:75%;" type="text" id="job_tasks" value="{job_tasks}"><br>
<label style="width:22%;display:inline-block">Job Tasks per Node </label>
<input style="width:75%;" type="text" id="job_ntaskpernode" value="{job_ntaskpernode}"><br>
<label style="width:22%;display:inline-block">Job Project </label>
<input style="width:75%;" type="text" id="job_project" value="{job_project}"><br>
<button onclick="apply()">Apply</button>
</div>
"""
javascript = """
<script type="text/Javascript">
function apply(){
var remote_data = ($('input#remote_data').is(':checked') ? 'True' : 'False');
var data_dir = $('input#data_dir').val();
var data_host = $('input#data_host').val();
var remote_server_auto = ($('input#remote_server_auto').is(':checked') ? 'True' : 'False');
var paraview_cmd = $('input#paraview_cmd').val();
var remote_cluster = ($('input#remote_cluster').is(':checked') ? 'True' : 'False');
var kernel = IPython.notebook.kernel;
// Send data dir to ipython
var command = "from zutil import post; post.data_dir = '" + data_dir + "'";
console.log("Executing Command: " + command);
kernel.execute(command);
// Send data host to ipython
var command = "from zutil import post; post.data_host = '" + data_host + "'";
console.log("Executing Command: " + command);
kernel.execute(command);
// Send remote server flag to ipython
var command = "from zutil import post; post.remote_server_auto = " + remote_server_auto;
console.log("Executing Command: " + command);
kernel.execute(command);
// Send paraview command to ipython
var command = "from zutil import post; post.paraview_cmd = '" + paraview_cmd + "'";
console.log("Executing Command: " + command);
kernel.execute(command);
// Send remote data flag to ipython
var command = "from zutil import post; post.remote_data = " + remote_data ;
console.log("Executing Command: " + command);
kernel.execute(command);
// Set paraview command to none if not using remote server
var command = "from zutil import post; if not post.remote_server_auto: post.paraview_cmd=None"
console.log("Executing Command: " + command);
kernel.execute(command);
// Set data to local host for local data
var command = "from zutil import post; if not post.post.remote_data: post.data_host='localhost'; post.paraview_cmd=None"
console.log("Executing Command: " + command);
kernel.execute(command);
if(remote_cluster == 'True'){
// Set cluster job info
//var command = "from zutil import post; post.jo";
}
}
</script>
"""
return HTML(
input_form.format(
data_dir=data_dir,
data_host=data_host,
paraview_cmd=paraview_cmd,
remote_data_checked=remote_data_checked,
remote_server_auto_checked=remote_server_auto_checked,
remote_cluster_checked=remote_cluster_checked,
job_queue=job_queue,
job_tasks=job_tasks,
job_ntaskpernode=job_ntaskpernode,
job_project=job_project,
)
+ javascript
)
|
mit
|
jesanabriah/ejercicios
|
proyecto-final/entropia.py
|
2
|
3149
|
# -*- coding: utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
import math
# Descomentar para especificar semilla
#np.random.seed(19680801)
# Valor de entropia maxima teorica
S_MAX = 4
#Numero de iteraciones
N = 9000
# Cantidad de caminantes
NCAM = 400
# Tamaño del paso
PAS = 1
# Numero de celdas, por favor siempre un numero par
NCEL = 8
# Tamaño de la grilla / 2
TAM = 100
# Limites de la grilla
xmin, ymin, xmax, ymax= -TAM, -TAM, TAM, TAM
# Tamaño de la celda
TAMCEL = 2.0*TAM/NCEL
# pocisiones de las particulas
dpos = {}
#prob = np.zeros((NCEL, NCEL))
entropia = 0
hindex = []
hentropia = []
def setTAM(newtam):
global TAM, xmin, ymin, xmax, ymax, TAMCEL
# Tamaño de la grilla / 2
TAM = newtam
# Limites de la grilla
xmin, ymin, xmax, ymax= -TAM, -TAM, TAM, TAM
# Tamaño de la celda
TAMCEL = 2.0*TAM/NCEL
def getCellPos(pos):
pos = pos / TAMCEL + NCEL/2
return int(math.floor(pos[0])), int(math.floor(pos[1]))
# initialization function: plot the background of each frame
def init():
global entropia
entropia = 0
# Inicializando caminantes
for cam in range(NCAM):
dpos[cam] = np.array([0, 0])
# animation function. This is called sequentially
def animate():
global entropia
global dpos
# Inicia matriz de probabilidades
prob = np.zeros((NCEL, NCEL))
x, y = [], []
for cam in range(NCAM):
x.append(dpos[cam][0])
y.append(dpos[cam][1])
c = getCellPos(dpos[cam])
prob[c] = prob[c] + 1
# Calculo de la entropia del sistema
entropia = 0
for i in range(NCEL):
for j in range(NCEL):
try:
entropia = entropia + prob[i][j] * math.log(prob[i][j]/NCAM)/NCAM
# Manejo de excepcion en caso de un logaritmo de cero, mas eficiente que un if
except ValueError:
pass
entropia = -entropia
# Inicializando distribuciones de numeros aleatorios
ddata = {}
for cam in range(NCAM):
data = np.random.randint(3, size=(1, 2))
data = data - 1
data = data * PAS
ddata[cam] = data[0]
# Calculando los caminos para los caminantes
for cam in range(NCAM):
pos_temp = dpos[cam] + ddata[cam]
if pos_temp[0] < xmax and pos_temp[1] < ymax and pos_temp[0] > xmin and pos_temp[1]> ymin:
dpos[cam] = pos_temp
# dimensiones y tiempos
dvals = []
tvals = []
# Creando animaciones
for num in range(10, 101, 5):
setTAM(num)
init()
n = 0
while entropia < S_MAX:
animate()
n = n + 1
dvals.append(num * 2)
tvals.append(n)
f = np.poly1d([1, 0, 0])
dvals2 = f(dvals)
# Retorna ax+b -> (a, b)
a, b = np.polyfit(dvals2, tvals, 1, rcond=None, full=False, w=None, cov=False)
f = np.poly1d([a, 0, b])
tvals_est = f(dvals)
plt.plot(dvals, tvals, 'k.')
plt.plot(dvals, tvals_est, 'r-')
plt.title(r"$\tau=${:.2f}".format(a) + r"$\cdot x^2+${:.2f}".format(b))
plt.xlabel(u"Dimensión")
plt.ylabel("Tiempo para equilibrio")
plt.grid()
#plt.xlim( 0, 200 )
#plt.ylim( 0, 4000 )
plt.show()
|
gpl-3.0
|
QISKit/qiskit-sdk-py
|
qiskit/visualization/bloch.py
|
1
|
27046
|
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
# This file is part of QuTiP: Quantum Toolbox in Python.
#
# Copyright (c) 2011 and later, Paul D. Nation and Robert J. Johansson.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names
# of its contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
###############################################################################
"""Bloch sphere"""
__all__ = ['Bloch']
import os
import numpy as np
from matplotlib import get_backend
import matplotlib.pyplot as plt # pylint: disable=import-error
from matplotlib.patches import FancyArrowPatch # pylint: disable=import-error
from mpl_toolkits.mplot3d import (Axes3D, proj3d) # pylint: disable=import-error
class Arrow3D(FancyArrowPatch):
"""Makes a fancy arrow"""
def __init__(self, xs, ys, zs, *args, **kwargs):
FancyArrowPatch.__init__(self, (0, 0), (0, 0), *args, **kwargs)
self._verts3d = xs, ys, zs
def draw(self, renderer):
xs3d, ys3d, zs3d = self._verts3d
x_s, y_s, _ = proj3d.proj_transform(xs3d, ys3d, zs3d, renderer.M)
self.set_positions((x_s[0], y_s[0]), (x_s[1], y_s[1]))
FancyArrowPatch.draw(self, renderer)
class Bloch():
"""Class for plotting data on the Bloch sphere. Valid data can be
either points, vectors, or qobj objects.
Attributes:
axes (instance):
User supplied Matplotlib axes for Bloch sphere animation.
fig (instance):
User supplied Matplotlib Figure instance for plotting Bloch sphere.
font_color (str):
Color of font used for Bloch sphere labels.
font_size (int):
Size of font used for Bloch sphere labels.
frame_alpha (float):
Sets transparency of Bloch sphere frame.
frame_color (str):
Color of sphere wireframe.
frame_width (int):
Width of wireframe.
point_color (list):
List of colors for Bloch sphere point markers to cycle through.
i.e. By default, points 0 and 4 will both be blue ('b').
point_marker (list):
List of point marker shapes to cycle through.
point_size (list):
List of point marker sizes. Note, not all point markers look
the same size when plotted!
sphere_alpha (float):
Transparency of Bloch sphere itself.
sphere_color (str):
Color of Bloch sphere.
figsize (list):
Figure size of Bloch sphere plot. Best to have both numbers the same;
otherwise you will have a Bloch sphere that looks like a football.
vector_color (list):
List of vector colors to cycle through.
vector_width (int):
Width of displayed vectors.
vector_style (str):
Vector arrowhead style (from matplotlib's arrow style).
vector_mutation (int):
Width of vectors arrowhead.
view (list):
Azimuthal and Elevation viewing angles.
xlabel (list):
List of strings corresponding to +x and -x axes labels, respectively.
xlpos (list):
Positions of +x and -x labels respectively.
ylabel (list):
List of strings corresponding to +y and -y axes labels, respectively.
ylpos (list):
Positions of +y and -y labels respectively.
zlabel (list):
List of strings corresponding to +z and -z axes labels, respectively.
zlpos (list):
Positions of +z and -z labels respectively.
"""
def __init__(self, fig=None, axes=None, view=None, figsize=None,
background=False):
# Figure and axes
self._ext_fig = False
if fig is not None:
self._ext_fig = True
self.fig = fig
self._ext_axes = False
if axes is not None:
self._ext_fig = True
self._ext_axes = True
self.axes = axes
# Background axes, default = False
self.background = background
# The size of the figure in inches, default = [5,5].
self.figsize = figsize if figsize else [5, 5]
# Azimuthal and Elevation viewing angles, default = [-60,30].
self.view = view if view else [-60, 30]
# Color of Bloch sphere, default = #FFDDDD
self.sphere_color = '#FFDDDD'
# Transparency of Bloch sphere, default = 0.2
self.sphere_alpha = 0.2
# Color of wireframe, default = 'gray'
self.frame_color = 'gray'
# Width of wireframe, default = 1
self.frame_width = 1
# Transparency of wireframe, default = 0.2
self.frame_alpha = 0.2
# Labels for x-axis (in LaTex), default = ['$x$', '']
self.xlabel = ['$x$', '']
# Position of x-axis labels, default = [1.2, -1.2]
self.xlpos = [1.2, -1.2]
# Labels for y-axis (in LaTex), default = ['$y$', '']
self.ylabel = ['$y$', '']
# Position of y-axis labels, default = [1.1, -1.1]
self.ylpos = [1.2, -1.2]
# Labels for z-axis (in LaTex),
# default = [r'$\left|1\right>$', r'$\left|0\right>$']
self.zlabel = [r'$\left|0\right>$', r'$\left|1\right>$']
# Position of z-axis labels, default = [1.2, -1.2]
self.zlpos = [1.2, -1.2]
# ---font options---
# Color of fonts, default = 'black'
self.font_color = 'black'
# Size of fonts, default = 20
self.font_size = 20
# ---vector options---
# List of colors for Bloch vectors, default = ['b','g','r','y']
self.vector_color = ['#dc267f', '#648fff', '#fe6100', '#785ef0',
'#ffb000']
#: Width of Bloch vectors, default = 5
self.vector_width = 5
#: Style of Bloch vectors, default = '-|>' (or 'simple')
self.vector_style = '-|>'
#: Sets the width of the vectors arrowhead
self.vector_mutation = 20
# ---point options---
# List of colors for Bloch point markers, default = ['b','g','r','y']
self.point_color = ['b', 'r', 'g', '#CC6600']
# Size of point markers, default = 25
self.point_size = [25, 32, 35, 45]
# Shape of point markers, default = ['o','^','d','s']
self.point_marker = ['o', 's', 'd', '^']
# ---data lists---
# Data for point markers
self.points = []
# Data for Bloch vectors
self.vectors = []
# Data for annotations
self.annotations = []
# Number of times sphere has been saved
self.savenum = 0
# Style of points, 'm' for multiple colors, 's' for single color
self.point_style = []
# status of rendering
self._rendered = False
def set_label_convention(self, convention):
"""Set x, y and z labels according to one of conventions.
Args:
convention (str):
One of the following:
- "original"
- "xyz"
- "sx sy sz"
- "01"
- "polarization jones"
- "polarization jones letters"
see also: http://en.wikipedia.org/wiki/Jones_calculus
- "polarization stokes"
see also: http://en.wikipedia.org/wiki/Stokes_parameters
Raises:
Exception: If convention is not valid.
"""
ketex = "$\\left.|%s\\right\\rangle$"
# \left.| is on purpose, so that every ket has the same size
if convention == "original":
self.xlabel = ['$x$', '']
self.ylabel = ['$y$', '']
self.zlabel = ['$\\left|0\\right>$', '$\\left|1\\right>$']
elif convention == "xyz":
self.xlabel = ['$x$', '']
self.ylabel = ['$y$', '']
self.zlabel = ['$z$', '']
elif convention == "sx sy sz":
self.xlabel = ['$s_x$', '']
self.ylabel = ['$s_y$', '']
self.zlabel = ['$s_z$', '']
elif convention == "01":
self.xlabel = ['', '']
self.ylabel = ['', '']
self.zlabel = ['$\\left|0\\right>$', '$\\left|1\\right>$']
elif convention == "polarization jones":
self.xlabel = [ketex % "\\nearrow\\hspace{-1.46}\\swarrow",
ketex % "\\nwarrow\\hspace{-1.46}\\searrow"]
self.ylabel = [ketex % "\\circlearrowleft", ketex %
"\\circlearrowright"]
self.zlabel = [ketex % "\\leftrightarrow", ketex % "\\updownarrow"]
elif convention == "polarization jones letters":
self.xlabel = [ketex % "D", ketex % "A"]
self.ylabel = [ketex % "L", ketex % "R"]
self.zlabel = [ketex % "H", ketex % "V"]
elif convention == "polarization stokes":
self.ylabel = ["$\\nearrow\\hspace{-1.46}\\swarrow$",
"$\\nwarrow\\hspace{-1.46}\\searrow$"]
self.zlabel = ["$\\circlearrowleft$", "$\\circlearrowright$"]
self.xlabel = ["$\\leftrightarrow$", "$\\updownarrow$"]
else:
raise Exception("No such convention.")
def __str__(self):
string = ""
string += "Bloch data:\n"
string += "-----------\n"
string += "Number of points: " + str(len(self.points)) + "\n"
string += "Number of vectors: " + str(len(self.vectors)) + "\n"
string += "\n"
string += "Bloch sphere properties:\n"
string += "------------------------\n"
string += "font_color: " + str(self.font_color) + "\n"
string += "font_size: " + str(self.font_size) + "\n"
string += "frame_alpha: " + str(self.frame_alpha) + "\n"
string += "frame_color: " + str(self.frame_color) + "\n"
string += "frame_width: " + str(self.frame_width) + "\n"
string += "point_color: " + str(self.point_color) + "\n"
string += "point_marker: " + str(self.point_marker) + "\n"
string += "point_size: " + str(self.point_size) + "\n"
string += "sphere_alpha: " + str(self.sphere_alpha) + "\n"
string += "sphere_color: " + str(self.sphere_color) + "\n"
string += "figsize: " + str(self.figsize) + "\n"
string += "vector_color: " + str(self.vector_color) + "\n"
string += "vector_width: " + str(self.vector_width) + "\n"
string += "vector_style: " + str(self.vector_style) + "\n"
string += "vector_mutation: " + str(self.vector_mutation) + "\n"
string += "view: " + str(self.view) + "\n"
string += "xlabel: " + str(self.xlabel) + "\n"
string += "xlpos: " + str(self.xlpos) + "\n"
string += "ylabel: " + str(self.ylabel) + "\n"
string += "ylpos: " + str(self.ylpos) + "\n"
string += "zlabel: " + str(self.zlabel) + "\n"
string += "zlpos: " + str(self.zlpos) + "\n"
return string
def clear(self):
"""Resets Bloch sphere data sets to empty.
"""
self.points = []
self.vectors = []
self.point_style = []
self.annotations = []
def add_points(self, points, meth='s'):
"""Add a list of data points to bloch sphere.
Args:
points (array_like):
Collection of data points.
meth (str):
Type of points to plot, use 'm' for multicolored, 'l' for points
connected with a line.
"""
if not isinstance(points[0], (list, np.ndarray)):
points = [[points[0]], [points[1]], [points[2]]]
points = np.array(points)
if meth == 's':
if len(points[0]) == 1:
pnts = np.array([[points[0][0]],
[points[1][0]], [points[2][0]]])
pnts = np.append(pnts, points, axis=1)
else:
pnts = points
self.points.append(pnts)
self.point_style.append('s')
elif meth == 'l':
self.points.append(points)
self.point_style.append('l')
else:
self.points.append(points)
self.point_style.append('m')
def add_vectors(self, vectors):
"""Add a list of vectors to Bloch sphere.
Args:
vectors (array_like):
Array with vectors of unit length or smaller.
"""
if isinstance(vectors[0], (list, np.ndarray)):
for vec in vectors:
self.vectors.append(vec)
else:
self.vectors.append(vectors)
def add_annotation(self, state_or_vector, text, **kwargs):
"""Add a text or LaTeX annotation to Bloch sphere,
parametrized by a qubit state or a vector.
Args:
state_or_vector (array_like):
Position for the annotation.
Qobj of a qubit or a vector of 3 elements.
text (str):
Annotation text.
You can use LaTeX, but remember to use raw string
e.g. r"$\\langle x \\rangle$"
or escape backslashes
e.g. "$\\\\langle x \\\\rangle$".
**kwargs:
Options as for mplot3d.axes3d.text, including:
fontsize, color, horizontalalignment, verticalalignment.
Raises:
Exception: If input not array_like or tuple.
"""
if isinstance(state_or_vector, (list, np.ndarray, tuple)) \
and len(state_or_vector) == 3:
vec = state_or_vector
else:
raise Exception("Position needs to be specified by a qubit " +
"state or a 3D vector.")
self.annotations.append({'position': vec,
'text': text,
'opts': kwargs})
def make_sphere(self):
"""
Plots Bloch sphere and data sets.
"""
self.render()
def render(self, title=''):
"""
Render the Bloch sphere and its data sets in on given figure and axes.
"""
if self._rendered:
self.axes.clear()
self._rendered = True
# Figure instance for Bloch sphere plot
if not self._ext_fig:
self.fig = plt.figure(figsize=self.figsize)
if not self._ext_axes:
self.axes = Axes3D(self.fig, azim=self.view[0], elev=self.view[1])
if self.background:
self.axes.clear()
self.axes.set_xlim3d(-1.3, 1.3)
self.axes.set_ylim3d(-1.3, 1.3)
self.axes.set_zlim3d(-1.3, 1.3)
else:
self.plot_axes()
self.axes.set_axis_off()
self.axes.set_xlim3d(-0.7, 0.7)
self.axes.set_ylim3d(-0.7, 0.7)
self.axes.set_zlim3d(-0.7, 0.7)
self.axes.grid(False)
self.plot_back()
self.plot_points()
self.plot_vectors()
self.plot_front()
self.plot_axes_labels()
self.plot_annotations()
self.axes.set_title(title, fontsize=self.font_size, y=1.08)
def plot_back(self):
"""back half of sphere"""
u_angle = np.linspace(0, np.pi, 25)
v_angle = np.linspace(0, np.pi, 25)
x_dir = np.outer(np.cos(u_angle), np.sin(v_angle))
y_dir = np.outer(np.sin(u_angle), np.sin(v_angle))
z_dir = np.outer(np.ones(u_angle.shape[0]), np.cos(v_angle))
self.axes.plot_surface(x_dir, y_dir, z_dir, rstride=2, cstride=2,
color=self.sphere_color, linewidth=0,
alpha=self.sphere_alpha)
# wireframe
self.axes.plot_wireframe(x_dir, y_dir, z_dir, rstride=5, cstride=5,
color=self.frame_color,
alpha=self.frame_alpha)
# equator
self.axes.plot(1.0 * np.cos(u_angle), 1.0 * np.sin(u_angle), zs=0, zdir='z',
lw=self.frame_width, color=self.frame_color)
self.axes.plot(1.0 * np.cos(u_angle), 1.0 * np.sin(u_angle), zs=0, zdir='x',
lw=self.frame_width, color=self.frame_color)
def plot_front(self):
"""front half of sphere"""
u_angle = np.linspace(-np.pi, 0, 25)
v_angle = np.linspace(0, np.pi, 25)
x_dir = np.outer(np.cos(u_angle), np.sin(v_angle))
y_dir = np.outer(np.sin(u_angle), np.sin(v_angle))
z_dir = np.outer(np.ones(u_angle.shape[0]), np.cos(v_angle))
self.axes.plot_surface(x_dir, y_dir, z_dir, rstride=2, cstride=2,
color=self.sphere_color, linewidth=0,
alpha=self.sphere_alpha)
# wireframe
self.axes.plot_wireframe(x_dir, y_dir, z_dir, rstride=5, cstride=5,
color=self.frame_color,
alpha=self.frame_alpha)
# equator
self.axes.plot(1.0 * np.cos(u_angle), 1.0 * np.sin(u_angle),
zs=0, zdir='z', lw=self.frame_width,
color=self.frame_color)
self.axes.plot(1.0 * np.cos(u_angle), 1.0 * np.sin(u_angle),
zs=0, zdir='x', lw=self.frame_width,
color=self.frame_color)
def plot_axes(self):
"""axes"""
span = np.linspace(-1.0, 1.0, 2)
self.axes.plot(span, 0 * span, zs=0, zdir='z', label='X',
lw=self.frame_width, color=self.frame_color)
self.axes.plot(0 * span, span, zs=0, zdir='z', label='Y',
lw=self.frame_width, color=self.frame_color)
self.axes.plot(0 * span, span, zs=0, zdir='y', label='Z',
lw=self.frame_width, color=self.frame_color)
def plot_axes_labels(self):
"""axes labels"""
opts = {'fontsize': self.font_size,
'color': self.font_color,
'horizontalalignment': 'center',
'verticalalignment': 'center'}
self.axes.text(0, -self.xlpos[0], 0, self.xlabel[0], **opts)
self.axes.text(0, -self.xlpos[1], 0, self.xlabel[1], **opts)
self.axes.text(self.ylpos[0], 0, 0, self.ylabel[0], **opts)
self.axes.text(self.ylpos[1], 0, 0, self.ylabel[1], **opts)
self.axes.text(0, 0, self.zlpos[0], self.zlabel[0], **opts)
self.axes.text(0, 0, self.zlpos[1], self.zlabel[1], **opts)
for item in (self.axes.w_xaxis.get_ticklines() +
self.axes.w_xaxis.get_ticklabels()):
item.set_visible(False)
for item in (self.axes.w_yaxis.get_ticklines() +
self.axes.w_yaxis.get_ticklabels()):
item.set_visible(False)
for item in (self.axes.w_zaxis.get_ticklines() +
self.axes.w_zaxis.get_ticklabels()):
item.set_visible(False)
def plot_vectors(self):
"""Plot vector"""
# -X and Y data are switched for plotting purposes
for k in range(len(self.vectors)):
xs3d = self.vectors[k][1] * np.array([0, 1])
ys3d = -self.vectors[k][0] * np.array([0, 1])
zs3d = self.vectors[k][2] * np.array([0, 1])
color = self.vector_color[np.mod(k, len(self.vector_color))]
if self.vector_style == '':
# simple line style
self.axes.plot(xs3d, ys3d, zs3d,
zs=0, zdir='z', label='Z',
lw=self.vector_width, color=color)
else:
# decorated style, with arrow heads
arr = Arrow3D(xs3d, ys3d, zs3d,
mutation_scale=self.vector_mutation,
lw=self.vector_width,
arrowstyle=self.vector_style,
color=color)
self.axes.add_artist(arr)
def plot_points(self):
"""Plot points"""
# -X and Y data are switched for plotting purposes
for k in range(len(self.points)):
num = len(self.points[k][0])
dist = [np.sqrt(self.points[k][0][j] ** 2 +
self.points[k][1][j] ** 2 +
self.points[k][2][j] ** 2) for j in range(num)]
if any(abs(dist - dist[0]) / dist[0] > 1e-12):
# combine arrays so that they can be sorted together
zipped = list(zip(dist, range(num)))
zipped.sort() # sort rates from lowest to highest
dist, indperm = zip(*zipped)
indperm = np.array(indperm)
else:
indperm = np.arange(num)
if self.point_style[k] == 's':
self.axes.scatter(
np.real(self.points[k][1][indperm]),
- np.real(self.points[k][0][indperm]),
np.real(self.points[k][2][indperm]),
s=self.point_size[np.mod(k, len(self.point_size))],
alpha=1,
edgecolor='none',
zdir='z',
color=self.point_color[np.mod(k, len(self.point_color))],
marker=self.point_marker[np.mod(k,
len(self.point_marker))])
elif self.point_style[k] == 'm':
pnt_colors = np.array(self.point_color *
int(np.ceil(num /
float(len(self.point_color)))))
pnt_colors = pnt_colors[0:num]
pnt_colors = list(pnt_colors[indperm])
marker = self.point_marker[np.mod(k, len(self.point_marker))]
pnt_size = self.point_size[np.mod(k, len(self.point_size))]
self.axes.scatter(np.real(self.points[k][1][indperm]),
-np.real(self.points[k][0][indperm]),
np.real(self.points[k][2][indperm]),
s=pnt_size, alpha=1, edgecolor='none',
zdir='z', color=pnt_colors,
marker=marker)
elif self.point_style[k] == 'l':
color = self.point_color[np.mod(k, len(self.point_color))]
self.axes.plot(np.real(self.points[k][1]),
-np.real(self.points[k][0]),
np.real(self.points[k][2]),
alpha=0.75, zdir='z',
color=color)
def plot_annotations(self):
"""Plot annotations"""
# -X and Y data are switched for plotting purposes
for annotation in self.annotations:
vec = annotation['position']
opts = {'fontsize': self.font_size,
'color': self.font_color,
'horizontalalignment': 'center',
'verticalalignment': 'center'}
opts.update(annotation['opts'])
self.axes.text(vec[1], -vec[0], vec[2],
annotation['text'], **opts)
def show(self, title=''):
"""
Display Bloch sphere and corresponding data sets.
"""
self.render(title=title)
if self.fig:
plt.show(self.fig)
def save(self, name=None, output='png', dirc=None):
"""Saves Bloch sphere to file of type ``format`` in directory ``dirc``.
Args:
name (str):
Name of saved image. Must include path and format as well.
i.e. '/Users/Paul/Desktop/bloch.png'
This overrides the 'format' and 'dirc' arguments.
output (str):
Format of output image.
dirc (str):
Directory for output images. Defaults to current working directory.
"""
self.render()
if dirc:
if not os.path.isdir(os.getcwd() + "/" + str(dirc)):
os.makedirs(os.getcwd() + "/" + str(dirc))
if name is None:
if dirc:
self.fig.savefig(os.getcwd() + "/" + str(dirc) + '/bloch_' +
str(self.savenum) + '.' + output)
else:
self.fig.savefig(os.getcwd() + '/bloch_' + str(self.savenum) +
'.' + output)
else:
self.fig.savefig(name)
self.savenum += 1
if self.fig:
if get_backend() in ['module://ipykernel.pylab.backend_inline',
'nbAgg']:
plt.close(self.fig)
def _hide_tick_lines_and_labels(axis):
"""
Set visible property of ticklines and ticklabels of an axis to False
"""
for item in axis.get_ticklines() + axis.get_ticklabels():
item.set_visible(False)
|
apache-2.0
|
jakirkham/bokeh
|
tests/test_examples.py
|
3
|
7377
|
from __future__ import absolute_import, print_function
import os
from os.path import basename
import time
import pytest
import subprocess
import signal
from os.path import dirname, exists, split
import six
from bokeh.server.callbacks import NextTickCallback, PeriodicCallback, TimeoutCallback
from bokeh._testing.util.images import image_diff
from bokeh._testing.util.screenshot import get_screenshot
from bokeh.client import push_session
from bokeh.command.util import build_single_handler_application
from bokeh.util.terminal import trace, info, fail, ok, red, warn, white
pytest_plugins = (
"bokeh._testing.plugins.bokeh_server",
"bokeh._testing.plugins.examples_report",
)
@pytest.mark.examples
def test_js_examples(js_example, example, report):
if example.is_skip:
pytest.skip("skipping %s" % example.relpath)
if example.no_js:
if not pytest.config.option.no_js:
warn("skipping bokehjs for %s" % example.relpath)
else:
_assert_snapshot(example, "file://%s" % example.path, 'js')
if example.no_diff:
warn("skipping image diff for %s" % example.relpath)
else:
_get_pdiff(example)
@pytest.mark.examples
def test_file_examples(file_example, example, report):
if example.is_skip:
pytest.skip("skipping %s" % example.relpath)
(status, duration, out, err) = _run_example(example)
info("Example run in %s" % white("%.3fs" % duration))
for line in out.split("\n"):
if len(line) == 0 or line.startswith("Wrote "):
continue
info(line, label="PY")
for line in err.split("\n"):
if len(line) == 0:
continue
warn(line, label="PY")
assert status != "timeout", "%s timed out" % example.relpath
assert status == 0, "%s failed to run (exit code %s)" % (example.relpath, status)
if example.no_js:
if not pytest.config.option.no_js:
warn("skipping bokehjs for %s" % example.relpath)
else:
_assert_snapshot(example, "file://%s.html" % example.path_no_ext, 'file')
if example.no_diff:
warn("skipping image diff for %s" % example.relpath)
else:
_get_pdiff(example)
@pytest.mark.examples
def test_server_examples(server_example, example, report, bokeh_server):
if example.is_skip:
pytest.skip("skipping %s" % example.relpath)
# mitigate some weird interaction isolated to simple ids, py2.7,
# "push_session" server usage, and TravisCI
if six.PY2: os.environ['BOKEH_SIMPLE_IDS'] = 'no'
app = build_single_handler_application(example.path)
doc = app.create_document()
if six.PY2: del os.environ['BOKEH_SIMPLE_IDS']
# remove all next-tick, periodic, and timeout callbacks
for session_callback in doc.session_callbacks:
if isinstance(session_callback, NextTickCallback):
doc.remove_next_tick_callback(session_callback)
elif isinstance(session_callback, PeriodicCallback):
doc.remove_periodic_callback(session_callback)
elif isinstance(session_callback, TimeoutCallback):
doc.remove_timeout_callback(session_callback)
else:
raise RuntimeError('Unhandled callback type', type(session_callback))
session_id = basename(example.path)
push_session(doc, session_id=session_id)
if example.no_js:
if not pytest.config.option.no_js:
warn("skipping bokehjs for %s" % example.relpath)
else:
_assert_snapshot(example, "http://localhost:5006/?bokeh-session-id=%s" % session_id, 'server')
if example.no_diff:
warn("skipping image diff for %s" % example.relpath)
else:
_get_pdiff(example)
def _get_pdiff(example):
img_path, ref_path, diff_path = example.img_path, example.ref_path, example.diff_path
trace("generated image: " + img_path)
ref = example.fetch_ref()
if not ref:
warn("reference image %s doesn't exist" % example.ref_url)
else:
ref_dir = dirname(ref_path)
if not exists(ref_dir):
os.makedirs(ref_dir)
with open(ref_path, "wb") as f:
f.write(ref)
trace("saved reference: " + ref_path)
example.pixels = image_diff(diff_path, img_path, ref_path)
if example.pixels != 0:
comment = white("%.02f%%" % example.pixels) + " of pixels"
warn("generated and reference images differ: %s" % comment)
else:
ok("generated and reference images match")
def _get_path_parts(path):
parts = []
while True:
newpath, tail = split(path)
parts.append(tail)
path = newpath
if tail == 'examples':
break
parts.reverse()
return parts
def _print_webengine_output(result):
errors = result['errors']
messages = result['messages']
for message in messages:
level = message['level']
text = message['text']
url = message['url']
line = message['line']
col = message['col']
msg = "{%s} %s:%s:%s %s" % (level, url, line, col, text)
info(msg, label="JS")
for error in errors:
for line in error['text'].split("\n"):
fail(line, label="JS")
def _assert_snapshot(example, url, example_type):
screenshot_path = example.img_path
width = 1000
height = 2000 if example_type == 'notebook' else 1000
local_wait = 100
global_wait = 15000
start = time.time()
result = get_screenshot(url, screenshot_path, local_wait, global_wait, width, height)
end = time.time()
info("Example rendered in %s" % white("%.3fs" % (end - start)))
success = result['success']
timeout = result['timeout']
errors = result['errors']
no_errors = len(errors) == 0
if timeout:
warn("%s %s" % (red("TIMEOUT:"), "bokehjs did not finish in %s ms" % global_wait))
if pytest.config.option.verbose:
_print_webengine_output(result)
assert success, "%s failed to load" % example.relpath
assert no_errors, "%s failed with %d errors" % (example.relpath, len(errors))
def _run_example(example):
code = """\
__file__ = filename = '%s'
import random
random.seed(1)
import numpy as np
np.random.seed(1)
import warnings
warnings.filterwarnings("ignore", ".*", UserWarning, "matplotlib.font_manager")
with open(filename, 'rb') as example:
exec(compile(example.read(), filename, 'exec'))
""" % example.path
cmd = ["python", "-c", code]
cwd = dirname(example.path)
env = os.environ.copy()
env['BOKEH_IGNORE_FILENAME'] = 'true'
env['BOKEH_RESOURCES'] = 'relative'
env['BOKEH_MINIFIED'] = 'false'
env['BOKEH_BROWSER'] = 'none'
class Timeout(Exception):
pass
def alarm_handler(sig, frame):
raise Timeout
signal.signal(signal.SIGALRM, alarm_handler)
signal.alarm(20 if not example.is_slow else 60)
start = time.time()
try:
proc = subprocess.Popen(cmd, cwd=cwd, env=env, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
status = proc.wait()
except Timeout:
proc.kill()
status = 'timeout'
finally:
signal.alarm(0)
end = time.time()
out = proc.stdout.read().decode("utf-8")
err = proc.stderr.read().decode("utf-8")
return (status, end - start, out, err)
|
bsd-3-clause
|
hlin117/scikit-learn
|
examples/tree/plot_iris.py
|
86
|
1965
|
"""
================================================================
Plot the decision surface of a decision tree on the iris dataset
================================================================
Plot the decision surface of a decision tree trained on pairs
of features of the iris dataset.
See :ref:`decision tree <tree>` for more information on the estimator.
For each pair of iris features, the decision tree learns decision
boundaries made of combinations of simple thresholding rules inferred from
the training samples.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_iris
from sklearn.tree import DecisionTreeClassifier
# Parameters
n_classes = 3
plot_colors = "bry"
plot_step = 0.02
# Load data
iris = load_iris()
for pairidx, pair in enumerate([[0, 1], [0, 2], [0, 3],
[1, 2], [1, 3], [2, 3]]):
# We only take the two corresponding features
X = iris.data[:, pair]
y = iris.target
# Train
clf = DecisionTreeClassifier().fit(X, y)
# Plot the decision boundary
plt.subplot(2, 3, pairidx + 1)
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),
np.arange(y_min, y_max, plot_step))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.xlabel(iris.feature_names[pair[0]])
plt.ylabel(iris.feature_names[pair[1]])
plt.axis("tight")
# Plot the training points
for i, color in zip(range(n_classes), plot_colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1], c=color, label=iris.target_names[i],
cmap=plt.cm.Paired)
plt.axis("tight")
plt.suptitle("Decision surface of a decision tree using paired features")
plt.legend()
plt.show()
|
bsd-3-clause
|
NSLS-II-SRX/ipython_ophyd
|
profile_srx_user/ipython_qtconsole_config.py
|
13
|
24674
|
# Configuration file for ipython-qtconsole.
c = get_config()
#------------------------------------------------------------------------------
# IPythonQtConsoleApp configuration
#------------------------------------------------------------------------------
# IPythonQtConsoleApp will inherit config from: BaseIPythonApplication,
# Application, IPythonConsoleApp, ConnectionFileMixin
# Set the kernel's IP address [default localhost]. If the IP address is
# something other than localhost, then Consoles on other machines will be able
# to connect to the Kernel, so be careful!
# c.IPythonQtConsoleApp.ip = u''
# Create a massive crash report when IPython encounters what may be an internal
# error. The default is to append a short message to the usual traceback
# c.IPythonQtConsoleApp.verbose_crash = False
# Start the console window maximized.
# c.IPythonQtConsoleApp.maximize = False
# The date format used by logging formatters for %(asctime)s
# c.IPythonQtConsoleApp.log_datefmt = '%Y-%m-%d %H:%M:%S'
# set the shell (ROUTER) port [default: random]
# c.IPythonQtConsoleApp.shell_port = 0
# The SSH server to use to connect to the kernel.
# c.IPythonQtConsoleApp.sshserver = ''
# set the stdin (DEALER) port [default: random]
# c.IPythonQtConsoleApp.stdin_port = 0
# Set the log level by value or name.
# c.IPythonQtConsoleApp.log_level = 30
# Path to the ssh key to use for logging in to the ssh server.
# c.IPythonQtConsoleApp.sshkey = ''
# Path to an extra config file to load.
#
# If specified, load this config file in addition to any other IPython config.
# c.IPythonQtConsoleApp.extra_config_file = u''
# Whether to create profile dir if it doesn't exist
# c.IPythonQtConsoleApp.auto_create = False
# path to a custom CSS stylesheet
# c.IPythonQtConsoleApp.stylesheet = ''
# set the heartbeat port [default: random]
# c.IPythonQtConsoleApp.hb_port = 0
# Whether to overwrite existing config files when copying
# c.IPythonQtConsoleApp.overwrite = False
# set the iopub (PUB) port [default: random]
# c.IPythonQtConsoleApp.iopub_port = 0
# The IPython profile to use.
# c.IPythonQtConsoleApp.profile = u'default'
# JSON file in which to store connection info [default: kernel-<pid>.json]
#
# This file will contain the IP, ports, and authentication key needed to connect
# clients to this kernel. By default, this file will be created in the security-
# dir of the current profile, but can be specified by absolute path.
# c.IPythonQtConsoleApp.connection_file = ''
# Set to display confirmation dialog on exit. You can always use 'exit' or
# 'quit', to force a direct exit without any confirmation.
# c.IPythonQtConsoleApp.confirm_exit = True
# The name of the IPython directory. This directory is used for logging
# configuration (through profiles), history storage, etc. The default is usually
# $HOME/.ipython. This options can also be specified through the environment
# variable IPYTHONDIR.
# c.IPythonQtConsoleApp.ipython_dir = u''
# Whether to install the default config files into the profile dir. If a new
# profile is being created, and IPython contains config files for that profile,
# then they will be staged into the new directory. Otherwise, default config
# files will be automatically generated.
# c.IPythonQtConsoleApp.copy_config_files = False
# Connect to an already running kernel
# c.IPythonQtConsoleApp.existing = ''
# Use a plaintext widget instead of rich text (plain can't print/save).
# c.IPythonQtConsoleApp.plain = False
# Start the console window with the menu bar hidden.
# c.IPythonQtConsoleApp.hide_menubar = False
# The Logging format template
# c.IPythonQtConsoleApp.log_format = '[%(name)s]%(highlevel)s %(message)s'
#
# c.IPythonQtConsoleApp.transport = 'tcp'
#------------------------------------------------------------------------------
# IPythonWidget configuration
#------------------------------------------------------------------------------
# A FrontendWidget for an IPython kernel.
# IPythonWidget will inherit config from: FrontendWidget, HistoryConsoleWidget,
# ConsoleWidget
# The type of completer to use. Valid values are:
#
# 'plain' : Show the available completion as a text list
# Below the editing area.
# 'droplist': Show the completion in a drop down list navigable
# by the arrow keys, and from which you can select
# completion by pressing Return.
# 'ncurses' : Show the completion as a text list which is navigable by
# `tab` and arrow keys.
# c.IPythonWidget.gui_completion = 'ncurses'
# Whether to process ANSI escape codes.
# c.IPythonWidget.ansi_codes = True
# A CSS stylesheet. The stylesheet can contain classes for:
# 1. Qt: QPlainTextEdit, QFrame, QWidget, etc
# 2. Pygments: .c, .k, .o, etc. (see PygmentsHighlighter)
# 3. IPython: .error, .in-prompt, .out-prompt, etc
# c.IPythonWidget.style_sheet = u''
# The height of the console at start time in number of characters (will double
# with `vsplit` paging)
# c.IPythonWidget.height = 25
#
# c.IPythonWidget.out_prompt = 'Out[<span class="out-prompt-number">%i</span>]: '
#
# c.IPythonWidget.input_sep = '\n'
# Whether to draw information calltips on open-parentheses.
# c.IPythonWidget.enable_calltips = True
#
# c.IPythonWidget.in_prompt = 'In [<span class="in-prompt-number">%i</span>]: '
# The width of the console at start time in number of characters (will double
# with `hsplit` paging)
# c.IPythonWidget.width = 81
# A command for invoking a system text editor. If the string contains a
# {filename} format specifier, it will be used. Otherwise, the filename will be
# appended to the end the command.
# c.IPythonWidget.editor = ''
# If not empty, use this Pygments style for syntax highlighting. Otherwise, the
# style sheet is queried for Pygments style information.
# c.IPythonWidget.syntax_style = u''
# The font family to use for the console. On OSX this defaults to Monaco, on
# Windows the default is Consolas with fallback of Courier, and on other
# platforms the default is Monospace.
# c.IPythonWidget.font_family = u''
# The pygments lexer class to use.
# c.IPythonWidget.lexer_class = <IPython.utils.traitlets.Undefined object at 0x1866810>
#
# c.IPythonWidget.output_sep2 = ''
# Whether to automatically execute on syntactically complete input.
#
# If False, Shift-Enter is required to submit each execution. Disabling this is
# mainly useful for non-Python kernels, where the completion check would be
# wrong.
# c.IPythonWidget.execute_on_complete_input = True
# The maximum number of lines of text before truncation. Specifying a non-
# positive number disables text truncation (not recommended).
# c.IPythonWidget.buffer_size = 500
#
# c.IPythonWidget.history_lock = False
#
# c.IPythonWidget.banner = u''
# The type of underlying text widget to use. Valid values are 'plain', which
# specifies a QPlainTextEdit, and 'rich', which specifies a QTextEdit.
# c.IPythonWidget.kind = 'plain'
# Whether to ask for user confirmation when restarting kernel
# c.IPythonWidget.confirm_restart = True
# The font size. If unconfigured, Qt will be entrusted with the size of the
# font.
# c.IPythonWidget.font_size = 0
# The editor command to use when a specific line number is requested. The string
# should contain two format specifiers: {line} and {filename}. If this parameter
# is not specified, the line number option to the %edit magic will be ignored.
# c.IPythonWidget.editor_line = u''
# Whether to clear the console when the kernel is restarted
# c.IPythonWidget.clear_on_kernel_restart = True
# The type of paging to use. Valid values are:
#
# 'inside'
# The widget pages like a traditional terminal.
# 'hsplit'
# When paging is requested, the widget is split horizontally. The top
# pane contains the console, and the bottom pane contains the paged text.
# 'vsplit'
# Similar to 'hsplit', except that a vertical splitter is used.
# 'custom'
# No action is taken by the widget beyond emitting a
# 'custom_page_requested(str)' signal.
# 'none'
# The text is written directly to the console.
# c.IPythonWidget.paging = 'inside'
#
# c.IPythonWidget.output_sep = ''
#------------------------------------------------------------------------------
# IPKernelApp configuration
#------------------------------------------------------------------------------
# IPython: an enhanced interactive Python shell.
# IPKernelApp will inherit config from: BaseIPythonApplication, Application,
# InteractiveShellApp
# Run the file referenced by the PYTHONSTARTUP environment variable at IPython
# startup.
# c.IPKernelApp.exec_PYTHONSTARTUP = True
# The importstring for the DisplayHook factory
# c.IPKernelApp.displayhook_class = 'IPython.kernel.zmq.displayhook.ZMQDisplayHook'
# Set the IP or interface on which the kernel will listen.
# c.IPKernelApp.ip = u''
# Pre-load matplotlib and numpy for interactive use, selecting a particular
# matplotlib backend and loop integration.
# c.IPKernelApp.pylab = None
# Create a massive crash report when IPython encounters what may be an internal
# error. The default is to append a short message to the usual traceback
# c.IPKernelApp.verbose_crash = False
# The Kernel subclass to be used.
#
# This should allow easy re-use of the IPKernelApp entry point to configure and
# launch kernels other than IPython's own.
# c.IPKernelApp.kernel_class = 'IPython.kernel.zmq.ipkernel.Kernel'
# Run the module as a script.
# c.IPKernelApp.module_to_run = ''
# The date format used by logging formatters for %(asctime)s
# c.IPKernelApp.log_datefmt = '%Y-%m-%d %H:%M:%S'
# set the shell (ROUTER) port [default: random]
# c.IPKernelApp.shell_port = 0
# set the control (ROUTER) port [default: random]
# c.IPKernelApp.control_port = 0
# Whether to overwrite existing config files when copying
# c.IPKernelApp.overwrite = False
# Execute the given command string.
# c.IPKernelApp.code_to_run = ''
# set the stdin (ROUTER) port [default: random]
# c.IPKernelApp.stdin_port = 0
# Set the log level by value or name.
# c.IPKernelApp.log_level = 30
# lines of code to run at IPython startup.
# c.IPKernelApp.exec_lines = []
# Path to an extra config file to load.
#
# If specified, load this config file in addition to any other IPython config.
# c.IPKernelApp.extra_config_file = u''
# The importstring for the OutStream factory
# c.IPKernelApp.outstream_class = 'IPython.kernel.zmq.iostream.OutStream'
# Whether to create profile dir if it doesn't exist
# c.IPKernelApp.auto_create = False
# set the heartbeat port [default: random]
# c.IPKernelApp.hb_port = 0
#
# c.IPKernelApp.transport = 'tcp'
# redirect stdout to the null device
# c.IPKernelApp.no_stdout = False
# Should variables loaded at startup (by startup files, exec_lines, etc.) be
# hidden from tools like %who?
# c.IPKernelApp.hide_initial_ns = True
# dotted module name of an IPython extension to load.
# c.IPKernelApp.extra_extension = ''
# A file to be run
# c.IPKernelApp.file_to_run = ''
# The IPython profile to use.
# c.IPKernelApp.profile = u'default'
#
# c.IPKernelApp.parent_appname = u''
# kill this process if its parent dies. On Windows, the argument specifies the
# HANDLE of the parent process, otherwise it is simply boolean.
# c.IPKernelApp.parent_handle = 0
# JSON file in which to store connection info [default: kernel-<pid>.json]
#
# This file will contain the IP, ports, and authentication key needed to connect
# clients to this kernel. By default, this file will be created in the security
# dir of the current profile, but can be specified by absolute path.
# c.IPKernelApp.connection_file = ''
# If true, IPython will populate the user namespace with numpy, pylab, etc. and
# an ``import *`` is done from numpy and pylab, when using pylab mode.
#
# When False, pylab mode should not import any names into the user namespace.
# c.IPKernelApp.pylab_import_all = True
# The name of the IPython directory. This directory is used for logging
# configuration (through profiles), history storage, etc. The default is usually
# $HOME/.ipython. This options can also be specified through the environment
# variable IPYTHONDIR.
# c.IPKernelApp.ipython_dir = u''
# Configure matplotlib for interactive use with the default matplotlib backend.
# c.IPKernelApp.matplotlib = None
# ONLY USED ON WINDOWS Interrupt this process when the parent is signaled.
# c.IPKernelApp.interrupt = 0
# Whether to install the default config files into the profile dir. If a new
# profile is being created, and IPython contains config files for that profile,
# then they will be staged into the new directory. Otherwise, default config
# files will be automatically generated.
# c.IPKernelApp.copy_config_files = False
# List of files to run at IPython startup.
# c.IPKernelApp.exec_files = []
# Enable GUI event loop integration with any of ('glut', 'gtk', 'gtk3', 'none',
# 'osx', 'pyglet', 'qt', 'qt4', 'tk', 'wx').
# c.IPKernelApp.gui = None
# A list of dotted module names of IPython extensions to load.
# c.IPKernelApp.extensions = []
# redirect stderr to the null device
# c.IPKernelApp.no_stderr = False
# The Logging format template
# c.IPKernelApp.log_format = '[%(name)s]%(highlevel)s %(message)s'
# set the iopub (PUB) port [default: random]
# c.IPKernelApp.iopub_port = 0
#------------------------------------------------------------------------------
# ZMQInteractiveShell configuration
#------------------------------------------------------------------------------
# A subclass of InteractiveShell for ZMQ.
# ZMQInteractiveShell will inherit config from: InteractiveShell
# Use colors for displaying information about objects. Because this information
# is passed through a pager (like 'less'), and some pagers get confused with
# color codes, this capability can be turned off.
# c.ZMQInteractiveShell.color_info = True
# A list of ast.NodeTransformer subclass instances, which will be applied to
# user input before code is run.
# c.ZMQInteractiveShell.ast_transformers = []
#
# c.ZMQInteractiveShell.history_length = 10000
# Don't call post-execute functions that have failed in the past.
# c.ZMQInteractiveShell.disable_failing_post_execute = False
# Show rewritten input, e.g. for autocall.
# c.ZMQInteractiveShell.show_rewritten_input = True
# Set the color scheme (NoColor, Linux, or LightBG).
# c.ZMQInteractiveShell.colors = 'Linux'
#
# c.ZMQInteractiveShell.separate_in = '\n'
# Deprecated, use PromptManager.in2_template
# c.ZMQInteractiveShell.prompt_in2 = ' .\\D.: '
#
# c.ZMQInteractiveShell.separate_out = ''
# Deprecated, use PromptManager.in_template
# c.ZMQInteractiveShell.prompt_in1 = 'In [\\#]: '
# Enable deep (recursive) reloading by default. IPython can use the deep_reload
# module which reloads changes in modules recursively (it replaces the reload()
# function, so you don't need to change anything to use it). deep_reload()
# forces a full reload of modules whose code may have changed, which the default
# reload() function does not. When deep_reload is off, IPython will use the
# normal reload(), but deep_reload will still be available as dreload().
# c.ZMQInteractiveShell.deep_reload = False
# Make IPython automatically call any callable object even if you didn't type
# explicit parentheses. For example, 'str 43' becomes 'str(43)' automatically.
# The value can be '0' to disable the feature, '1' for 'smart' autocall, where
# it is not applied if there are no more arguments on the line, and '2' for
# 'full' autocall, where all callable objects are automatically called (even if
# no arguments are present).
# c.ZMQInteractiveShell.autocall = 0
#
# c.ZMQInteractiveShell.separate_out2 = ''
# Deprecated, use PromptManager.justify
# c.ZMQInteractiveShell.prompts_pad_left = True
#
# c.ZMQInteractiveShell.readline_parse_and_bind = ['tab: complete', '"\\C-l": clear-screen', 'set show-all-if-ambiguous on', '"\\C-o": tab-insert', '"\\C-r": reverse-search-history', '"\\C-s": forward-search-history', '"\\C-p": history-search-backward', '"\\C-n": history-search-forward', '"\\e[A": history-search-backward', '"\\e[B": history-search-forward', '"\\C-k": kill-line', '"\\C-u": unix-line-discard']
# Enable magic commands to be called without the leading %.
# c.ZMQInteractiveShell.automagic = True
#
# c.ZMQInteractiveShell.debug = False
#
# c.ZMQInteractiveShell.object_info_string_level = 0
#
# c.ZMQInteractiveShell.ipython_dir = ''
#
# c.ZMQInteractiveShell.readline_remove_delims = '-/~'
# Start logging to the default log file.
# c.ZMQInteractiveShell.logstart = False
# The name of the logfile to use.
# c.ZMQInteractiveShell.logfile = ''
#
# c.ZMQInteractiveShell.wildcards_case_sensitive = True
# Save multi-line entries as one entry in readline history
# c.ZMQInteractiveShell.multiline_history = True
# Start logging to the given file in append mode.
# c.ZMQInteractiveShell.logappend = ''
#
# c.ZMQInteractiveShell.xmode = 'Context'
#
# c.ZMQInteractiveShell.quiet = False
# Deprecated, use PromptManager.out_template
# c.ZMQInteractiveShell.prompt_out = 'Out[\\#]: '
# Set the size of the output cache. The default is 1000, you can change it
# permanently in your config file. Setting it to 0 completely disables the
# caching system, and the minimum value accepted is 20 (if you provide a value
# less than 20, it is reset to 0 and a warning is issued). This limit is
# defined because otherwise you'll spend more time re-flushing a too small cache
# than working
# c.ZMQInteractiveShell.cache_size = 1000
# 'all', 'last', 'last_expr' or 'none', specifying which nodes should be run
# interactively (displaying output from expressions).
# c.ZMQInteractiveShell.ast_node_interactivity = 'last_expr'
# Automatically call the pdb debugger after every exception.
# c.ZMQInteractiveShell.pdb = False
#------------------------------------------------------------------------------
# KernelManager configuration
#------------------------------------------------------------------------------
# Manages a single kernel in a subprocess on this host.
#
# This version starts kernels with Popen.
# KernelManager will inherit config from: ConnectionFileMixin
# The Popen Command to launch the kernel. Override this if you have a custom
# kernel. If kernel_cmd is specified in a configuration file, IPython does not
# pass any arguments to the kernel, because it cannot make any assumptions about
# the arguments that the kernel understands. In particular, this means that the
# kernel does not receive the option --debug if it given on the IPython command
# line.
# c.KernelManager.kernel_cmd = []
# Set the kernel's IP address [default localhost]. If the IP address is
# something other than localhost, then Consoles on other machines will be able
# to connect to the Kernel, so be careful!
# c.KernelManager.ip = u''
#
# c.KernelManager.transport = 'tcp'
# Should we autorestart the kernel if it dies.
# c.KernelManager.autorestart = False
#------------------------------------------------------------------------------
# ProfileDir configuration
#------------------------------------------------------------------------------
# An object to manage the profile directory and its resources.
#
# The profile directory is used by all IPython applications, to manage
# configuration, logging and security.
#
# This object knows how to find, create and manage these directories. This
# should be used by any code that wants to handle profiles.
# Set the profile location directly. This overrides the logic used by the
# `profile` option.
# c.ProfileDir.location = u''
#------------------------------------------------------------------------------
# Session configuration
#------------------------------------------------------------------------------
# Object for handling serialization and sending of messages.
#
# The Session object handles building messages and sending them with ZMQ sockets
# or ZMQStream objects. Objects can communicate with each other over the
# network via Session objects, and only need to work with the dict-based IPython
# message spec. The Session will handle serialization/deserialization, security,
# and metadata.
#
# Sessions support configurable serialization via packer/unpacker traits, and
# signing with HMAC digests via the key/keyfile traits.
#
# Parameters ----------
#
# debug : bool
# whether to trigger extra debugging statements
# packer/unpacker : str : 'json', 'pickle' or import_string
# importstrings for methods to serialize message parts. If just
# 'json' or 'pickle', predefined JSON and pickle packers will be used.
# Otherwise, the entire importstring must be used.
#
# The functions must accept at least valid JSON input, and output *bytes*.
#
# For example, to use msgpack:
# packer = 'msgpack.packb', unpacker='msgpack.unpackb'
# pack/unpack : callables
# You can also set the pack/unpack callables for serialization directly.
# session : bytes
# the ID of this Session object. The default is to generate a new UUID.
# username : unicode
# username added to message headers. The default is to ask the OS.
# key : bytes
# The key used to initialize an HMAC signature. If unset, messages
# will not be signed or checked.
# keyfile : filepath
# The file containing a key. If this is set, `key` will be initialized
# to the contents of the file.
# Username for the Session. Default is your system username.
# c.Session.username = u'swilkins'
# The name of the unpacker for unserializing messages. Only used with custom
# functions for `packer`.
# c.Session.unpacker = 'json'
# Threshold (in bytes) beyond which a buffer should be sent without copying.
# c.Session.copy_threshold = 65536
# The name of the packer for serializing messages. Should be one of 'json',
# 'pickle', or an import name for a custom callable serializer.
# c.Session.packer = 'json'
# The maximum number of digests to remember.
#
# The digest history will be culled when it exceeds this value.
# c.Session.digest_history_size = 65536
# The UUID identifying this session.
# c.Session.session = u''
# The digest scheme used to construct the message signatures. Must have the form
# 'hmac-HASH'.
# c.Session.signature_scheme = 'hmac-sha256'
# execution key, for extra authentication.
# c.Session.key = ''
# Debug output in the Session
# c.Session.debug = False
# The maximum number of items for a container to be introspected for custom
# serialization. Containers larger than this are pickled outright.
# c.Session.item_threshold = 64
# path to file containing execution key.
# c.Session.keyfile = ''
# Threshold (in bytes) beyond which an object's buffer should be extracted to
# avoid pickling.
# c.Session.buffer_threshold = 1024
# Metadata dictionary, which serves as the default top-level metadata dict for
# each message.
# c.Session.metadata = {}
#------------------------------------------------------------------------------
# InlineBackend configuration
#------------------------------------------------------------------------------
# An object to store configuration of the inline backend.
# The figure format to enable (deprecated use `figure_formats` instead)
# c.InlineBackend.figure_format = u''
# A set of figure formats to enable: 'png', 'retina', 'jpeg', 'svg', 'pdf'.
# c.InlineBackend.figure_formats = set(['png'])
# Extra kwargs to be passed to fig.canvas.print_figure.
#
# Logical examples include: bbox_inches, quality (for jpeg figures), etc.
# c.InlineBackend.print_figure_kwargs = {'bbox_inches': 'tight'}
# Close all figures at the end of each cell.
#
# When True, ensures that each cell starts with no active figures, but it also
# means that one must keep track of references in order to edit or redraw
# figures in subsequent cells. This mode is ideal for the notebook, where
# residual plots from other cells might be surprising.
#
# When False, one must call figure() to create new figures. This means that
# gcf() and getfigs() can reference figures created in other cells, and the
# active figure can continue to be edited with pylab/pyplot methods that
# reference the current active figure. This mode facilitates iterative editing
# of figures, and behaves most consistently with other matplotlib backends, but
# figure barriers between cells must be explicit.
# c.InlineBackend.close_figures = True
# Subset of matplotlib rcParams that should be different for the inline backend.
# c.InlineBackend.rc = {'font.size': 10, 'figure.figsize': (6.0, 4.0), 'figure.facecolor': (1, 1, 1, 0), 'savefig.dpi': 72, 'figure.subplot.bottom': 0.125, 'figure.edgecolor': (1, 1, 1, 0)}
|
bsd-2-clause
|
ZenDevelopmentSystems/scikit-learn
|
examples/covariance/plot_lw_vs_oas.py
|
248
|
2903
|
"""
=============================
Ledoit-Wolf vs OAS estimation
=============================
The usual covariance maximum likelihood estimate can be regularized
using shrinkage. Ledoit and Wolf proposed a close formula to compute
the asymptotically optimal shrinkage parameter (minimizing a MSE
criterion), yielding the Ledoit-Wolf covariance estimate.
Chen et al. proposed an improvement of the Ledoit-Wolf shrinkage
parameter, the OAS coefficient, whose convergence is significantly
better under the assumption that the data are Gaussian.
This example, inspired from Chen's publication [1], shows a comparison
of the estimated MSE of the LW and OAS methods, using Gaussian
distributed data.
[1] "Shrinkage Algorithms for MMSE Covariance Estimation"
Chen et al., IEEE Trans. on Sign. Proc., Volume 58, Issue 10, October 2010.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy.linalg import toeplitz, cholesky
from sklearn.covariance import LedoitWolf, OAS
np.random.seed(0)
###############################################################################
n_features = 100
# simulation covariance matrix (AR(1) process)
r = 0.1
real_cov = toeplitz(r ** np.arange(n_features))
coloring_matrix = cholesky(real_cov)
n_samples_range = np.arange(6, 31, 1)
repeat = 100
lw_mse = np.zeros((n_samples_range.size, repeat))
oa_mse = np.zeros((n_samples_range.size, repeat))
lw_shrinkage = np.zeros((n_samples_range.size, repeat))
oa_shrinkage = np.zeros((n_samples_range.size, repeat))
for i, n_samples in enumerate(n_samples_range):
for j in range(repeat):
X = np.dot(
np.random.normal(size=(n_samples, n_features)), coloring_matrix.T)
lw = LedoitWolf(store_precision=False, assume_centered=True)
lw.fit(X)
lw_mse[i, j] = lw.error_norm(real_cov, scaling=False)
lw_shrinkage[i, j] = lw.shrinkage_
oa = OAS(store_precision=False, assume_centered=True)
oa.fit(X)
oa_mse[i, j] = oa.error_norm(real_cov, scaling=False)
oa_shrinkage[i, j] = oa.shrinkage_
# plot MSE
plt.subplot(2, 1, 1)
plt.errorbar(n_samples_range, lw_mse.mean(1), yerr=lw_mse.std(1),
label='Ledoit-Wolf', color='g')
plt.errorbar(n_samples_range, oa_mse.mean(1), yerr=oa_mse.std(1),
label='OAS', color='r')
plt.ylabel("Squared error")
plt.legend(loc="upper right")
plt.title("Comparison of covariance estimators")
plt.xlim(5, 31)
# plot shrinkage coefficient
plt.subplot(2, 1, 2)
plt.errorbar(n_samples_range, lw_shrinkage.mean(1), yerr=lw_shrinkage.std(1),
label='Ledoit-Wolf', color='g')
plt.errorbar(n_samples_range, oa_shrinkage.mean(1), yerr=oa_shrinkage.std(1),
label='OAS', color='r')
plt.xlabel("n_samples")
plt.ylabel("Shrinkage")
plt.legend(loc="lower right")
plt.ylim(plt.ylim()[0], 1. + (plt.ylim()[1] - plt.ylim()[0]) / 10.)
plt.xlim(5, 31)
plt.show()
|
bsd-3-clause
|
timtroendle/urban-occupants-paper
|
urbanoccupants/tests/test_hipf_with_toy_example.py
|
1
|
9065
|
"""Testing the HIPF algorithm with the toy example from its original paper.
The paper defining the algorithm and the toy example is:
Müller and Axhausen 2011: "Hierarchical IPF: Generating a synthetic population for Switzerland"
"""
import itertools
from collections import namedtuple
import pandas as pd
import numpy as np
from pandas.util.testing import assert_series_equal
import pytest
from urbanoccupants.hipf import fit_hipf
HouseholdType = namedtuple('HouseholdType', ['household_ids', 'a', 'alpha', 'weights'])
@pytest.fixture
def household_types():
household_types = []
household_types.append(HouseholdType(
household_ids=range(1, 23),
a=True,
alpha=[True, False, False],
weights=[1.33, 1.28, 1.18]
))
household_types.append(HouseholdType(
household_ids=range(23, 44),
a=True,
alpha=[True, False],
weights=[1.61, 1.61, 1.50]
))
household_types.append(HouseholdType(
household_ids=range(44, 65),
a=True,
alpha=[False, False, False],
weights=[0.92, 0.75, 0.54]
))
household_types.append(HouseholdType(
household_ids=range(65, 81),
a=False,
alpha=[False, False],
weights=[0.45, 0.38, 0.28]
))
household_types.append(HouseholdType(
household_ids=range(81, 97),
a=False,
alpha=[True, False, False],
weights=[0.62, 0.66, 0.68]
))
household_types.append(HouseholdType(
household_ids=range(97, 109),
a=False,
alpha=[False],
weights=[0.48, 0.38, 0.26]
))
household_types.append(HouseholdType(
household_ids=range(109, 120),
a=True,
alpha=[False, False],
weights=[0.97, 0.75, 0.49]
))
household_types.append(HouseholdType(
household_ids=range(120, 129),
a=True,
alpha=[False],
weights=[1.01, 0.75, 0.45]
))
household_types.append(HouseholdType(
household_ids=range(129, 137),
a=False,
alpha=[True, True, False],
weights=[0.82, 1.00, 1.30]
))
household_types.append(HouseholdType(
household_ids=range(137, 145),
a=True,
alpha=[True, True, False],
weights=[1.73, 1.95, 2.24]
))
household_types.append(HouseholdType(
household_ids=range(145, 152),
a=False,
alpha=[True, False],
weights=[0.75, 0.82, 0.87]
))
household_types.append(HouseholdType(
household_ids=range(152, 159),
a=False,
alpha=[False, False, False],
weights=[0.43, 0.38, 0.31]
))
household_types.append(HouseholdType(
household_ids=range(159, 165),
a=True,
alpha=[True],
weights=[2.35, 2.76, 3.27]
))
household_types.append(HouseholdType(
household_ids=range(165, 171),
a=True,
alpha=[True, True],
weights=[2.25, 2.75, 3.58]
))
household_types.append(HouseholdType(
household_ids=range(171, 174),
a=False,
alpha=[True],
weights=[1.11, 1.41, 1.89]
))
household_types.append(HouseholdType(
household_ids=range(174, 176),
a=True,
alpha=[True, True, True],
weights=[2.14, 2.74, 3.92]
))
household_types.append(HouseholdType(
household_ids=range(176, 177),
a=False,
alpha=[True, True],
weights=[1.06, 1.40, 2.07]
))
return household_types
@pytest.fixture
def reference_sample(household_types):
id_tuples = itertools.chain(*(itertools.product(ht.household_ids, range(1, len(ht.alpha) + 1))
for ht in household_types))
index = pd.MultiIndex.from_tuples(list(id_tuples), names=['household_id', 'person_id'])
ref_sample = pd.DataFrame(index=index, columns=['a', 'alpha'])
for ht in household_types:
ref_sample.ix[ht.household_ids[0]: ht.household_ids[-1], 'a'] = ht.a
for p, alpha in enumerate(ht.alpha):
ref_sample.loc[
(slice(ht.household_ids[0], ht.household_ids[-1]), p + 1),
'alpha'
] = alpha
return ref_sample
@pytest.fixture
def expected_weights(household_types, reference_sample):
expected_weights = pd.DataFrame(
index=reference_sample.groupby(reference_sample.index.get_level_values(0))
.count().index.get_level_values(0),
columns=[0, 5, 10, 'infinity'],
dtype=np.float64
)
expected_weights[0] = 1.
for ht in household_types:
for household_id in ht.household_ids:
expected_weights.ix[household_id, 5] = ht.weights[0]
expected_weights.ix[household_id, 10] = ht.weights[1]
expected_weights.ix[household_id, 'infinity'] = ht.weights[2]
return expected_weights
@pytest.fixture
def controls_households():
return {'a': {True: 145, False: 45}}
@pytest.fixture
def controls_individuals():
return {'alpha': {True: 227, False: 207}}
def assert_weights_equal(expected_weights, weights, precision=1):
weights.name = expected_weights.name # don't want to check the name
assert_series_equal(expected_weights, weights, check_less_precise=precision)
def test_first_iteration(reference_sample, expected_weights,
controls_households, controls_individuals):
weights = fit_hipf(
reference_sample=reference_sample,
controls_individuals=controls_individuals,
controls_households=controls_households,
maxiter=1
)
assert_weights_equal(expected_weights[5], weights) # 5 iterations in paper represent 1 iteration
def test_second_iteration(reference_sample, expected_weights,
controls_households, controls_individuals):
weights = fit_hipf(
reference_sample=reference_sample,
controls_individuals=controls_individuals,
controls_households=controls_households,
maxiter=2
)
assert_weights_equal(expected_weights[10], weights) # 5 iteration in paper represent 1 iteration
def test_convergence(reference_sample, expected_weights,
controls_households, controls_individuals):
weights = fit_hipf(
reference_sample=reference_sample,
controls_individuals=controls_individuals,
controls_households=controls_households,
maxiter=10
)
assert_weights_equal(expected_weights['infinity'], weights)
@pytest.mark.parametrize("tol", [(10), (1), (0.1)]) # assertion below uses tolerance 0.01
def test_residuals_tolerance_criteria_stops_early(reference_sample, expected_weights, tol,
controls_households, controls_individuals):
weights = fit_hipf(
reference_sample=reference_sample,
controls_individuals=controls_individuals,
controls_households=controls_households,
residuals_tol=tol,
weights_tol=1e-16,
maxiter=10
)
with pytest.raises(AssertionError):
assert_weights_equal(expected_weights['infinity'], weights)
@pytest.mark.parametrize("tol", [(0.01), (0.001)]) # assertion below uses tolerance 0.01
def test_residuals_tolerance_criteria_does_not_stop_early(reference_sample, expected_weights,
tol, controls_households,
controls_individuals):
weights = fit_hipf(
reference_sample=reference_sample,
controls_individuals=controls_individuals,
controls_households=controls_households,
residuals_tol=tol,
weights_tol=1e-16,
maxiter=10
)
assert_weights_equal(expected_weights['infinity'], weights)
@pytest.mark.parametrize("tol", [(10), (1), (0.1)]) # assertion below uses tolerance 0.01
def test_weights_tolerance_criteria_stops_early(reference_sample, expected_weights, tol,
controls_households, controls_individuals):
weights = fit_hipf(
reference_sample=reference_sample,
controls_individuals=controls_individuals,
controls_households=controls_households,
residuals_tol=1e-16,
weights_tol=tol,
maxiter=10
)
with pytest.raises(AssertionError):
assert_weights_equal(expected_weights['infinity'], weights)
@pytest.mark.parametrize("tol", [(0.01), (0.001)]) # assertion below uses tolerance 0.01
def test_weights_tolerance_criteria_does_not_stop_early(reference_sample, expected_weights,
tol, controls_households,
controls_individuals):
weights = fit_hipf(
reference_sample=reference_sample,
controls_individuals=controls_individuals,
controls_households=controls_households,
residuals_tol=1e-16,
weights_tol=tol,
maxiter=10
)
assert_weights_equal(expected_weights['infinity'], weights)
|
mit
|
webmasterraj/FogOrNot
|
flask/lib/python2.7/site-packages/pandas/tseries/tests/test_timeseries_legacy.py
|
6
|
8447
|
# pylint: disable-msg=E1101,W0612
from datetime import datetime, time, timedelta
import sys
import os
import nose
import numpy as np
randn = np.random.randn
from pandas import (Index, Series, TimeSeries, DataFrame,
isnull, date_range, Timestamp, DatetimeIndex,
Int64Index, to_datetime, bdate_range)
import pandas.core.datetools as datetools
import pandas.tseries.offsets as offsets
import pandas as pd
from pandas.util.testing import assert_series_equal, assert_almost_equal
import pandas.util.testing as tm
from pandas.compat import(
range, long, StringIO, lrange, lmap, map, zip, cPickle as pickle, product
)
from pandas import read_pickle
from numpy.random import rand
import pandas.compat as compat
from pandas.core.datetools import BDay
# infortunately, too much has changed to handle these legacy pickles
# class TestLegacySupport(unittest.TestCase):
class LegacySupport(object):
_multiprocess_can_split_ = True
@classmethod
def setUpClass(cls):
if compat.PY3:
raise nose.SkipTest("not compatible with Python >= 3")
pth, _ = os.path.split(os.path.abspath(__file__))
filepath = os.path.join(pth, 'data', 'frame.pickle')
with open(filepath, 'rb') as f:
cls.frame = pickle.load(f)
filepath = os.path.join(pth, 'data', 'series.pickle')
with open(filepath, 'rb') as f:
cls.series = pickle.load(f)
def test_pass_offset_warn(self):
buf = StringIO()
sys.stderr = buf
DatetimeIndex(start='1/1/2000', periods=10, offset='H')
sys.stderr = sys.__stderr__
def test_unpickle_legacy_frame(self):
dtindex = DatetimeIndex(start='1/3/2005', end='1/14/2005',
freq=BDay(1))
unpickled = self.frame
self.assertEqual(type(unpickled.index), DatetimeIndex)
self.assertEqual(len(unpickled), 10)
self.assertTrue((unpickled.columns == Int64Index(np.arange(5))).all())
self.assertTrue((unpickled.index == dtindex).all())
self.assertEqual(unpickled.index.offset, BDay(1, normalize=True))
def test_unpickle_legacy_series(self):
from pandas.core.datetools import BDay
unpickled = self.series
dtindex = DatetimeIndex(start='1/3/2005', end='1/14/2005',
freq=BDay(1))
self.assertEqual(type(unpickled.index), DatetimeIndex)
self.assertEqual(len(unpickled), 10)
self.assertTrue((unpickled.index == dtindex).all())
self.assertEqual(unpickled.index.offset, BDay(1, normalize=True))
def test_unpickle_legacy_len0_daterange(self):
pth, _ = os.path.split(os.path.abspath(__file__))
filepath = os.path.join(pth, 'data', 'series_daterange0.pickle')
result = pd.read_pickle(filepath)
ex_index = DatetimeIndex([], freq='B')
self.assertTrue(result.index.equals(ex_index))
tm.assert_isinstance(result.index.freq, offsets.BDay)
self.assertEqual(len(result), 0)
def test_arithmetic_interaction(self):
index = self.frame.index
obj_index = index.asobject
dseries = Series(rand(len(index)), index=index)
oseries = Series(dseries.values, index=obj_index)
result = dseries + oseries
expected = dseries * 2
tm.assert_isinstance(result.index, DatetimeIndex)
assert_series_equal(result, expected)
result = dseries + oseries[:5]
expected = dseries + dseries[:5]
tm.assert_isinstance(result.index, DatetimeIndex)
assert_series_equal(result, expected)
def test_join_interaction(self):
index = self.frame.index
obj_index = index.asobject
def _check_join(left, right, how='inner'):
ra, rb, rc = left.join(right, how=how, return_indexers=True)
ea, eb, ec = left.join(DatetimeIndex(right), how=how,
return_indexers=True)
tm.assert_isinstance(ra, DatetimeIndex)
self.assertTrue(ra.equals(ea))
assert_almost_equal(rb, eb)
assert_almost_equal(rc, ec)
_check_join(index[:15], obj_index[5:], how='inner')
_check_join(index[:15], obj_index[5:], how='outer')
_check_join(index[:15], obj_index[5:], how='right')
_check_join(index[:15], obj_index[5:], how='left')
def test_join_nonunique(self):
idx1 = to_datetime(['2012-11-06 16:00:11.477563',
'2012-11-06 16:00:11.477563'])
idx2 = to_datetime(['2012-11-06 15:11:09.006507',
'2012-11-06 15:11:09.006507'])
rs = idx1.join(idx2, how='outer')
self.assertTrue(rs.is_monotonic)
def test_unpickle_daterange(self):
pth, _ = os.path.split(os.path.abspath(__file__))
filepath = os.path.join(pth, 'data', 'daterange_073.pickle')
rng = read_pickle(filepath)
tm.assert_isinstance(rng[0], datetime)
tm.assert_isinstance(rng.offset, offsets.BDay)
self.assertEqual(rng.values.dtype, object)
def test_setops(self):
index = self.frame.index
obj_index = index.asobject
result = index[:5].union(obj_index[5:])
expected = index
tm.assert_isinstance(result, DatetimeIndex)
self.assertTrue(result.equals(expected))
result = index[:10].intersection(obj_index[5:])
expected = index[5:10]
tm.assert_isinstance(result, DatetimeIndex)
self.assertTrue(result.equals(expected))
result = index[:10] - obj_index[5:]
expected = index[:5]
tm.assert_isinstance(result, DatetimeIndex)
self.assertTrue(result.equals(expected))
def test_index_conversion(self):
index = self.frame.index
obj_index = index.asobject
conv = DatetimeIndex(obj_index)
self.assertTrue(conv.equals(index))
self.assertRaises(ValueError, DatetimeIndex, ['a', 'b', 'c', 'd'])
def test_tolist(self):
rng = date_range('1/1/2000', periods=10)
result = rng.tolist()
tm.assert_isinstance(result[0], Timestamp)
def test_object_convert_fail(self):
idx = DatetimeIndex([NaT])
self.assertRaises(ValueError, idx.astype, 'O')
def test_setops_conversion_fail(self):
index = self.frame.index
right = Index(['a', 'b', 'c', 'd'])
result = index.union(right)
expected = Index(np.concatenate([index.asobject, right]))
self.assertTrue(result.equals(expected))
result = index.intersection(right)
expected = Index([])
self.assertTrue(result.equals(expected))
def test_legacy_time_rules(self):
rules = [('WEEKDAY', 'B'),
('EOM', 'BM'),
('W@MON', 'W-MON'), ('W@TUE', 'W-TUE'), ('W@WED', 'W-WED'),
('W@THU', 'W-THU'), ('W@FRI', 'W-FRI'),
('Q@JAN', 'BQ-JAN'), ('Q@FEB', 'BQ-FEB'), ('Q@MAR', 'BQ-MAR'),
('A@JAN', 'BA-JAN'), ('A@FEB', 'BA-FEB'), ('A@MAR', 'BA-MAR'),
('A@APR', 'BA-APR'), ('A@MAY', 'BA-MAY'), ('A@JUN', 'BA-JUN'),
('A@JUL', 'BA-JUL'), ('A@AUG', 'BA-AUG'), ('A@SEP', 'BA-SEP'),
('A@OCT', 'BA-OCT'), ('A@NOV', 'BA-NOV'), ('A@DEC', 'BA-DEC'),
('WOM@1FRI', 'WOM-1FRI'), ('WOM@2FRI', 'WOM-2FRI'),
('WOM@3FRI', 'WOM-3FRI'), ('WOM@4FRI', 'WOM-4FRI')]
start, end = '1/1/2000', '1/1/2010'
for old_freq, new_freq in rules:
old_rng = date_range(start, end, freq=old_freq)
new_rng = date_range(start, end, freq=new_freq)
self.assertTrue(old_rng.equals(new_rng))
# test get_legacy_offset_name
offset = datetools.get_offset(new_freq)
old_name = datetools.get_legacy_offset_name(offset)
self.assertEqual(old_name, old_freq)
def test_ms_vs_MS(self):
left = datetools.get_offset('ms')
right = datetools.get_offset('MS')
self.assertEqual(left, datetools.Milli())
self.assertEqual(right, datetools.MonthBegin())
def test_rule_aliases(self):
rule = datetools.to_offset('10us')
self.assertEqual(rule, datetools.Micro(10))
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
|
gpl-2.0
|
IshankGulati/scikit-learn
|
examples/neighbors/plot_digits_kde_sampling.py
|
108
|
2026
|
"""
=========================
Kernel Density Estimation
=========================
This example shows how kernel density estimation (KDE), a powerful
non-parametric density estimation technique, can be used to learn
a generative model for a dataset. With this generative model in place,
new samples can be drawn. These new samples reflect the underlying model
of the data.
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_digits
from sklearn.neighbors import KernelDensity
from sklearn.decomposition import PCA
from sklearn.model_selection import GridSearchCV
# load the data
digits = load_digits()
data = digits.data
# project the 64-dimensional data to a lower dimension
pca = PCA(n_components=15, whiten=False)
data = pca.fit_transform(digits.data)
# use grid search cross-validation to optimize the bandwidth
params = {'bandwidth': np.logspace(-1, 1, 20)}
grid = GridSearchCV(KernelDensity(), params)
grid.fit(data)
print("best bandwidth: {0}".format(grid.best_estimator_.bandwidth))
# use the best estimator to compute the kernel density estimate
kde = grid.best_estimator_
# sample 44 new points from the data
new_data = kde.sample(44, random_state=0)
new_data = pca.inverse_transform(new_data)
# turn data into a 4x11 grid
new_data = new_data.reshape((4, 11, -1))
real_data = digits.data[:44].reshape((4, 11, -1))
# plot real digits and resampled digits
fig, ax = plt.subplots(9, 11, subplot_kw=dict(xticks=[], yticks=[]))
for j in range(11):
ax[4, j].set_visible(False)
for i in range(4):
im = ax[i, j].imshow(real_data[i, j].reshape((8, 8)),
cmap=plt.cm.binary, interpolation='nearest')
im.set_clim(0, 16)
im = ax[i + 5, j].imshow(new_data[i, j].reshape((8, 8)),
cmap=plt.cm.binary, interpolation='nearest')
im.set_clim(0, 16)
ax[0, 5].set_title('Selection from the input data')
ax[5, 5].set_title('"New" digits drawn from the kernel density model')
plt.show()
|
bsd-3-clause
|
public-ink/public-ink
|
server/appengine/lib/matplotlib/tight_bbox.py
|
22
|
2601
|
"""
This module is to support *bbox_inches* option in savefig command.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import warnings
from matplotlib.transforms import Bbox, TransformedBbox, Affine2D
def adjust_bbox(fig, bbox_inches, fixed_dpi=None):
"""
Temporarily adjust the figure so that only the specified area
(bbox_inches) is saved.
It modifies fig.bbox, fig.bbox_inches,
fig.transFigure._boxout, and fig.patch. While the figure size
changes, the scale of the original figure is conserved. A
function which restores the original values are returned.
"""
origBbox = fig.bbox
origBboxInches = fig.bbox_inches
_boxout = fig.transFigure._boxout
asp_list = []
locator_list = []
for ax in fig.axes:
pos = ax.get_position(original=False).frozen()
locator_list.append(ax.get_axes_locator())
asp_list.append(ax.get_aspect())
def _l(a, r, pos=pos):
return pos
ax.set_axes_locator(_l)
ax.set_aspect("auto")
def restore_bbox():
for ax, asp, loc in zip(fig.axes, asp_list, locator_list):
ax.set_aspect(asp)
ax.set_axes_locator(loc)
fig.bbox = origBbox
fig.bbox_inches = origBboxInches
fig.transFigure._boxout = _boxout
fig.transFigure.invalidate()
fig.patch.set_bounds(0, 0, 1, 1)
if fixed_dpi is not None:
tr = Affine2D().scale(fixed_dpi)
dpi_scale = fixed_dpi / fig.dpi
else:
tr = Affine2D().scale(fig.dpi)
dpi_scale = 1.
_bbox = TransformedBbox(bbox_inches, tr)
fig.bbox_inches = Bbox.from_bounds(0, 0,
bbox_inches.width, bbox_inches.height)
x0, y0 = _bbox.x0, _bbox.y0
w1, h1 = fig.bbox.width * dpi_scale, fig.bbox.height * dpi_scale
fig.transFigure._boxout = Bbox.from_bounds(-x0, -y0, w1, h1)
fig.transFigure.invalidate()
fig.bbox = TransformedBbox(fig.bbox_inches, tr)
fig.patch.set_bounds(x0 / w1, y0 / h1,
fig.bbox.width / w1, fig.bbox.height / h1)
return restore_bbox
def process_figure_for_rasterizing(fig, bbox_inches_restore, fixed_dpi=None):
"""
This need to be called when figure dpi changes during the drawing
(e.g., rasterizing). It recovers the bbox and re-adjust it with
the new dpi.
"""
bbox_inches, restore_bbox = bbox_inches_restore
restore_bbox()
r = adjust_bbox(fig, bbox_inches, fixed_dpi)
return bbox_inches, r
|
gpl-3.0
|
SKIRT/PTS
|
magic/train/collection.py
|
1
|
9368
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
# *****************************************************************
# ** PTS -- Python Toolkit for working with SKIRT **
# ** © Astronomical Observatory, Ghent University **
# *****************************************************************
## \package pts.magic.train.collection Contains the Collector class.
# -----------------------------------------------------------------
# Ensure Python 3 functionality
from __future__ import absolute_import, division, print_function
# Import standard modules
import os
import matplotlib.pyplot as plt
from matplotlib.widgets import Button
# Import the relevant PTS classes and modules
from ..core.image import Image
from ..core.source import Source
from ...core.tools import introspection
from ...core.tools import filesystem as fs
from ...core.basics.configurable import Configurable
# -----------------------------------------------------------------
description = {"star": "star", "saturation": "saturated star / diffraction pattern"}
# -----------------------------------------------------------------
class Collector(Configurable):
"""
This class ...
"""
def __init__(self, *args, **kwargs):
"""
The constructor ...
:param kwargs:
:return:
"""
# Call the constructor of the base class
super(Collector, self).__init__(*args, **kwargs)
# The current and previous source
self.previous_source = None
self.current_source = None
# Determine the path to the magic/collection user directory
self.collection_user_path = os.path.join(introspection.pts_user_dir, "magic", "collection")
# Create the user collection directory
fs.create_directory(self.collection_user_path)
self.yes_path = None
self.no_path = None
self.last_path = None
self.current_index_yes = -1
self.current_index_no = -1
# -----------------------------------------------------------------
@classmethod
def from_arguments(cls, arguments):
"""
This function ...
:param arguments:
:return:
"""
# Create a new Collector instance
collector = cls()
# -- Adjust the configuration settings according to the command-line arguments --
collector.config.mode = arguments.mode
# Return the collector
return collector
# -----------------------------------------------------------------
def _run(self, **kwargs):
"""
This function ...
:param kwargs:
:return:
"""
# 2. Collect
self.collect()
# -----------------------------------------------------------------
def clear(self):
"""
This function ...
:return:
"""
self.current_source = None
self.yes_path = None
self.no_path = None
self.current_index_yes = -1
self.current_index_no = -1
# -----------------------------------------------------------------
def setup(self, **kwargs):
"""
This function ...
:param kwargs:
:return:
"""
# Call the setup function of the base class
super(Collector, self).setup(**kwargs)
# Get description
self.description = description[self.config.mode]
# Determine the paths to the star collection and saturation collection directories
mode_path = os.path.join(self.collection_user_path, self.config.mode)
# Create the star collection and saturation collection directories
fs.create_directory(mode_path)
# Determine the paths to the 'yes' and 'no' saturation collection directories
self.yes_path = os.path.join(mode_path, "yes")
self.no_path = os.path.join(mode_path, "no")
# Current index of saturation sources
self.current_index_yes = -1
self.current_index_no = -1
for path in fs.files_in_path(self.yes_path, extension="fits"):
name = os.path.basename(path)
index = int(name.split(".fits")[0])
if index > self.current_index_yes: self.current_index_yes = index
for path in fs.files_in_path(self.no_path, extension="fits"):
name = os.path.basename(path)
index = int(name.split(".fits")[0])
if index > self.current_index_no: self.current_index_no = index
# -----------------------------------------------------------------
def collect(self):
"""
This function ...
:return:
"""
# Get a list of the filepaths for every FITS file in the current working directory
file_paths = fs.files_in_path(os.getcwd(), extension="fits", contains=self.config.mode)
# Keep track of how many files have been processed
self.number_of_files = len(file_paths)
self.processed = 0
# Loop over all FITS files found in the current directory
for file_path in file_paths:
# Get information
name = os.path.basename(file_path).split(".fits")[0]
info, index = name.split("_")
# Open the image, select all frames
image = Image.from_file(file_path, always_call_first_primary=False)
image.frames.select_all()
# Create a source
source = Source.from_image(image)
self.show(source)
# -----------------------------------------------------------------
def show(self, source):
"""
This function ...
:param source:
:return:
"""
# Create a plot for the source
source.plot(title="Is this a " + self.description + "? (" + str(self.processed) + " out of " + str(self.number_of_files) + ")", show=False, scale="log")
# Set current and previous source
self.previous_source = self.current_source
self.current_source = source
# Axes
axyes = plt.axes([0.6, 0.05, 0.1, 0.075])
axno = plt.axes([0.7, 0.05, 0.1, 0.075])
axunsure = plt.axes([0.8, 0.05, 0.1, 0.075])
axback = plt.axes([0.1, 0.05, 0.1, 0.075])
# Buttons
yes_button = Button(axyes, 'Yes')
yes_button.on_clicked(self.save_yes)
no_button = Button(axno, 'No')
no_button.on_clicked(self.save_no)
unsure_button = Button(axunsure, 'Unsure')
unsure_button.on_clicked(self.dont_save)
back_button = Button(axback, 'Back')
back_button.on_clicked(self.go_back)
# Show the plot
plt.show()
# Increment the counter
self.processed += 1
# -----------------------------------------------------------------
def save_yes(self, event):
"""
This function ...
:param event:
:return:
"""
# Increment index
self.current_index_yes += 1
# Determine the path to the new FITS file
path = os.path.join(self.yes_path, str(self.current_index_yes) + ".fits")
# Inform the user and save the source object
self.log.info("Saving the " + self.description + " to " + path)
self.current_source.saveto(path)
self.last_path = path
# Close the currently active plotting window for this source
plt.close()
# -----------------------------------------------------------------
def save_no(self, event):
"""
This function ...
:param event:
:return:
"""
# Increment index
self.current_index_no += 1
# Determine the path to the new FITS file
path = os.path.join(self.no_path, str(self.current_index_no) + ".fits")
# Inform the user and save the source object
self.log.info("Saving the source to " + path)
self.current_source.saveto(path)
self.last_path = path
# Close the currently active plotting window for this source
plt.close()
# -----------------------------------------------------------------
def show_frame(self, event):
"""
This function ...
:param event:
:return:
"""
pass
# -----------------------------------------------------------------
def go_back(self, event):
"""
This function ...
:param event:
:return:
"""
if self.previous_source is not None:
# Inform the user
self.log.info("Going back to the previous source")
plt.close()
# Remove saved file
if self.last_path is not None: fs.remove_file(self.last_path)
self.processed -= 1
self.current_source = None
self.show(self.previous_source)
else: self.log.warning("Cannot go back")
# -----------------------------------------------------------------
def dont_save(self, event):
"""
This function ...
:param event:
:return:
"""
# Inform the user
self.log.info("Ignoring source")
self.last_path = None
# Close the currently active plotting window for this source
plt.close()
# -----------------------------------------------------------------
|
agpl-3.0
|
zhenwendai/RGP
|
svi_experiments/rgp_experiments.py
|
1
|
42403
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Mon Aug 21 11:51:21 2017
@author: grigoral
"""
from __future__ import print_function
import autoreg
import GPy
import numpy as np
from matplotlib import pyplot as plt
import scipy.io as io
from autoreg.benchmark import tasks
# Define class for normalization
class Normalize(object):
def __init__(self, data, name, norm_name):
self.data_mean = data.mean(axis=0)
self.data_std = data.std(axis=0)
self.normalization_computed = True
setattr(self, name, data)
setattr(self, norm_name, (data-self.data_mean) / self.data_std )
def normalize(self, data, name, norm_name):
if hasattr(self,norm_name):
raise ValueError("This normalization name already exist, choose another one")
setattr(self, name, data )
setattr(self, norm_name, (data-self.data_mean) / self.data_std )
def denormalize(self, data):
return data*self.data_std + self.data_mean
def prepare_data(task_name, normalize=False):
task = getattr( tasks, task_name)
task = task()
task.load_data()
print("Data OUT train shape: ", task.data_out_train.shape)
print("Data IN train shape: ", task.data_in_train.shape)
print("Data OUT test shape: ", task.data_out_test.shape)
print("Data IN test shape: ", task.data_in_test.shape)
normalize = True
in_data = Normalize(task.data_in_train,'in_train','in_train_norm' )
out_data = Normalize(task.data_out_train,'out_train','out_train_norm' )
in_data.normalize(task.data_in_test, 'in_test','in_test_norm')
out_data.normalize(task.data_out_test, 'out_test','out_test_norm')
if normalize:
out_train = out_data.out_train_norm #out_data.out_train
in_train = in_data.in_train_norm # in_data.in_train
out_test = out_data.out_test_norm #out_data.out_test
in_test = in_data.in_test_norm #in_data.in_test
else:
out_train = out_data.out_train #out_data.out_train
in_train = in_data.in_train # in_data.in_train
out_test = out_data.out_test #out_data.out_test
in_test = in_data.in_test #in_data.in_test
return out_train, in_train, task
class IE5_experiment_1( object ):
"""
Tested parameters are: initial number of optimization runs, number of hidden dims, number of inducing points.
After the first experiment the conclusion is that 1 hidden dim is the best, but also
the optimization is not very explorative.
Probably there was an error in the experi,ent setup because I did not change the number of hidden layers
only the number of hidden dimensions in 1 layer.
Best values: ini_runs = 160.0, hidden dim=1., Q=50. (237.44060068)
Iteration 21
"""
def __init__( self, initial_counter, iter_nums):
self.counter = initial_counter
self.iter_nums = iter_nums
def __call__( self, *args, **kwargs ):
#import pdb; pdb.set_trace()
new_args = (self.counter,self.iter_nums,) + tuple( [ int(args[0][0,i]) for i in range(args[0].shape[1]) ] )
ret = self.IE5_experiment_1_code( *new_args, **kwargs)
self.counter += 1
return ret
@staticmethod
def IE5_experiment_1_code( bo_iter_no, p_iter_num, p_init_runs, p_hidden_dims, p_Q):
"""
Hyper parameter search for IE5 data, varying small number of parameters.
One hidden layer.
"""
# p_iter_num # How many iteration are needed to evaluate one set of hyper parameterss
# task names:
# Actuator, Ballbeam, Drive, Gas_furnace, Flutter, Dryer, Tank,
# IdentificationExample1..5
#import pdb; pdb.set_trace()
out_train, in_train, task = prepare_data('IdentificationExample5', normalize=True)
p_task_name = 'IE5_1'
#p_iteration =
train_U = in_train.copy()
train_Y = out_train.copy()
p_init_runs = p_init_runs
p_max_runs = 10000
p_num_layers = 1
p_hidden_dims = [p_hidden_dims,]
p_inference_method = None
p_back_cstr = False
p_MLP_Dims = None
p_Q = p_Q
p_win_in = task.win_in
p_win_out = task.win_out
p_init = 'Y'
p_x_init_var = 0.05
result = list()
for i_no in range(0, p_iter_num): # iterations take into account model randomness e.g. initialization of inducing points
res = rgp_experiment_raw(p_task_name, bo_iter_no*10+i_no, train_U, train_Y, p_init_runs, p_max_runs, p_num_layers, p_hidden_dims,
p_inference_method, p_back_cstr, p_MLP_Dims, p_Q, p_win_in, p_win_out, p_init, p_x_init_var)
result.append(res[0])
#import pdb; pdb.set_trace()
return np.array(((np.min(result),),))
class IE5_experiment_2( object ):
"""
Tested parameters are: initial number of optimization runs, number of inducing points.
Conclusions after the experiment: The output file contains only variables as var_1, var_2 etc.
but Xavier said that the order is presearved.
The optimal values are: init_runs = 110, Q (ind. num) = 200. (run 3), 240.44817869
Bu the results are still the same from run to run.
Total running time was 40hours on GPU machine.
Maybe we can reduce the number of intrinsic iterations per evaluation.
Now idea is to use manually designad initial values to run more proper experiment. (Experiment 3)
"""
def __init__( self, initial_counter, iter_nums):
self.counter = initial_counter
self.iter_nums = iter_nums
def __call__( self, *args, **kwargs ):
#import pdb; pdb.set_trace()
new_args = (self.counter,self.iter_nums,) + tuple( [ int(args[0][0,i]) for i in range(args[0].shape[1]) ] )
ret = self.IE5_experiment_2_code( *new_args, **kwargs)
self.counter += 1
return ret
@staticmethod
def IE5_experiment_2_code( bo_iter_no, p_iter_num, p_init_runs, p_Q):
"""
Hyper parameter search for IE5 data, varying small number of parameters.
One hidden layer.
"""
# p_iter_num # How many iteration are needed to evaluate one set of hyper parameterss
# task names:
# Actuator, Ballbeam, Drive, Gas_furnace, Flutter, Dryer, Tank,
# IdentificationExample1..5
#import pdb; pdb.set_trace()
out_train, in_train, task = prepare_data('IdentificationExample5', normalize=True)
p_task_name = 'IE5_2'
#p_iteration =
train_U = in_train.copy()
train_Y = out_train.copy()
p_init_runs = p_init_runs
p_max_runs = 10000
p_num_layers = 1
p_hidden_dims = [1,]
p_inference_method = None
p_back_cstr = False
p_MLP_Dims = None
p_Q = p_Q
p_win_in = task.win_in
p_win_out = task.win_out
p_init = 'Y'
p_x_init_var = 0.05
result = list()
for i_no in range(0, p_iter_num): # iterations take into account model randomness e.g. initialization of inducing points
res = rgp_experiment_raw(p_task_name, bo_iter_no*10+i_no, train_U, train_Y, p_init_runs, p_max_runs, p_num_layers, p_hidden_dims,
p_inference_method, p_back_cstr, p_MLP_Dims, p_Q, p_win_in, p_win_out, p_init, p_x_init_var)
result.append(res[0])
#import pdb; pdb.set_trace()
return np.array(((np.min(result),),))
class IE5_experiment_3( object ):
"""
Tested parameters are: initial number of number of layers, number of inducing points.
Also, I do initial evaluations manually.
Best values: 1 layer, 40 inducing points, (run 7) 242.67636311
This value is laregr than in the other experiments. Manybe becuase there were only 2 internal runs
in every function evaluation.
"""
def __init__( self, initial_counter, iter_nums):
self.counter = initial_counter
self.iter_nums = iter_nums
def __call__( self, *args, **kwargs ):
#import pdb; pdb.set_trace()
new_args = (self.counter,self.iter_nums,) + tuple( [ int(args[0][0,i]) for i in range(args[0].shape[1]) ] )
ret = self.IE5_experiment_3_code( *new_args, **kwargs)
self.counter += 1
return ret
@staticmethod
def IE5_experiment_3_code( bo_iter_no, p_iter_num, p_layer_num, p_Q):
"""
Hyper parameter search for IE5 data, varying small number of parameters.
One hidden layer.
"""
# p_iter_num # How many iteration are needed to evaluate one set of hyper parameterss
# task names:
# Actuator, Ballbeam, Drive, Gas_furnace, Flutter, Dryer, Tank,
# IdentificationExample1..5
#import pdb; pdb.set_trace()
out_train, in_train, task = prepare_data('IdentificationExample5', normalize=True)
p_task_name = 'IE5_2'
#p_iteration =
train_U = in_train.copy()
train_Y = out_train.copy()
p_init_runs = 130
p_max_runs = 10000
p_num_layers = p_layer_num
p_hidden_dims = [1,1,]
p_inference_method = None
p_back_cstr = False
p_MLP_Dims = None
p_Q = p_Q
p_win_in = task.win_in
p_win_out = task.win_out
p_init = 'Y'
p_x_init_var = 0.05
result = list()
for i_no in range(0, p_iter_num): # iterations take into account model randomness e.g. initialization of inducing points
res = rgp_experiment_raw(p_task_name, bo_iter_no*10+i_no, train_U, train_Y, p_init_runs, p_max_runs, p_num_layers, p_hidden_dims,
p_inference_method, p_back_cstr, p_MLP_Dims, p_Q, p_win_in, p_win_out, p_init, p_x_init_var)
result.append(res[0])
#import pdb; pdb.set_trace()
return np.array(((np.min(result),),))
class IE5_experiment_4( object ):
"""
SVI inference
Tested parameters are: number of initial runs, number of inducing points.
First SVI experiment.
"""
def __init__( self, initial_counter, iter_nums):
self.counter = initial_counter
self.iter_nums = iter_nums
def __call__( self, *args, **kwargs ):
#import pdb; pdb.set_trace()
new_args = (self.counter,self.iter_nums,) + tuple( [ int(args[0][0,i]) for i in range(args[0].shape[1]) ] )
ret = self.IE5_experiment_4_code( *new_args, **kwargs)
self.counter += 1
return ret
@staticmethod
def IE5_experiment_4_code( bo_iter_no, p_iter_num, p_init_runs, p_Q):
"""
Hyper parameter search for IE5 data, varying small number of parameters.
One hidden layer.
"""
# p_iter_num # How many iteration are needed to evaluate one set of hyper parameterss
# task names:
# Actuator, Ballbeam, Drive, Gas_furnace, Flutter, Dryer, Tank,
# IdentificationExample1..5
#import pdb; pdb.set_trace()
out_train, in_train, task = prepare_data('IdentificationExample5', normalize=True)
p_task_name = 'IE5_4'
#p_iteration =
train_U = in_train.copy()
train_Y = out_train.copy()
p_init_runs = p_init_runs
p_max_runs = 12000
p_num_layers = 1
p_hidden_dims = [1,1,]
p_inference_method = 'svi'
p_back_cstr = False
p_MLP_Dims = None
p_Q = p_Q
p_win_in = task.win_in
p_win_out = task.win_out
p_init = 'Y'
p_x_init_var = 0.05
result = list()
for i_no in range(0, p_iter_num): # iterations take into account model randomness e.g. initialization of inducing points
res = rgp_experiment_raw(p_task_name, bo_iter_no*10+i_no, train_U, train_Y, p_init_runs, p_max_runs, p_num_layers, p_hidden_dims,
p_inference_method, p_back_cstr, p_MLP_Dims, p_Q, p_win_in, p_win_out, p_init, p_x_init_var)
result.append(res[0])
#import pdb; pdb.set_trace()
return np.array(((np.min(result),),))
class IE5_experiment_5( object ):
"""
Back constrains + SVI inference
Tested parameters are: number of initial runs, number of inducing points.
"""
def __init__( self, initial_counter, iter_nums):
self.counter = initial_counter
self.iter_nums = iter_nums
def __call__( self, *args, **kwargs ):
#import pdb; pdb.set_trace()
new_args = (self.counter,self.iter_nums,) + tuple( [ int(args[0][0,i]) for i in range(args[0].shape[1]) ] )
ret = self.IE5_experiment_5_code( *new_args, **kwargs)
self.counter += 1
return ret
@staticmethod
def IE5_experiment_5_code( bo_iter_no, p_iter_num, p_init_runs, p_Q):
"""
Hyper parameter search for IE5 data, varying small number of parameters.
One hidden layer.
"""
# p_iter_num # How many iteration are needed to evaluate one set of hyper parameterss
# task names:
# Actuator, Ballbeam, Drive, Gas_furnace, Flutter, Dryer, Tank,
# IdentificationExample1..5
#import pdb; pdb.set_trace()
out_train, in_train, task = prepare_data('IdentificationExample5', normalize=True)
p_task_name = 'IE5_5'
#p_iteration =
train_U = in_train.copy()
train_Y = out_train.copy()
p_init_runs = p_init_runs
p_max_runs = 12000
p_num_layers = 1
p_hidden_dims = [1,1,]
p_inference_method = 'svi'
p_back_cstr = True
p_rnn_type='gru'
p_rnn_hidden_dims=[20,]
p_Q = p_Q
p_win_in = task.win_in
p_win_out = task.win_out
result = list()
for i_no in range(0, p_iter_num): # iterations take into account model randomness e.g. initialization of inducing points
res = rgp_experiment_bcstr_raw(p_task_name, bo_iter_no*10+i_no, train_U, train_Y, p_init_runs, p_max_runs, p_num_layers, p_hidden_dims,
p_inference_method, p_back_cstr, p_rnn_type, p_rnn_hidden_dims, p_Q, p_win_in, p_win_out)
result.append(res[0])
#import pdb; pdb.set_trace()
return np.array(((np.min(result),),))
class IE5_experiment_6( object ):
"""
Same as experiment 5, only the model has changes now it includes the RGP inputs at
encoder inputs.
Back constrains + SVI inference
Tested parameters are: number of initial runs, number of inducing points.
"""
def __init__( self, initial_counter, iter_nums):
self.counter = initial_counter
self.iter_nums = iter_nums
def __call__( self, *args, **kwargs ):
#import pdb; pdb.set_trace()
new_args = (self.counter,self.iter_nums,) + tuple( [ int(args[0][0,i]) for i in range(args[0].shape[1]) ] )
ret = self.IE5_experiment_6_code( *new_args, **kwargs)
self.counter += 1
return ret
@staticmethod
def IE5_experiment_6_code( bo_iter_no, p_iter_num, p_init_runs, p_Q):
"""
Hyper parameter search for IE5 data, varying small number of parameters.
One hidden layer.
"""
# p_iter_num # How many iteration are needed to evaluate one set of hyper parameterss
# task names:
# Actuator, Ballbeam, Drive, Gas_furnace, Flutter, Dryer, Tank,
# IdentificationExample1..5
#import pdb; pdb.set_trace()
out_train, in_train, task = prepare_data('IdentificationExample5', normalize=True)
p_task_name = 'IE5_6'
#p_iteration =
train_U = in_train.copy()
train_Y = out_train.copy()
p_init_runs = p_init_runs
p_max_runs = 15000
p_num_layers = 1
p_hidden_dims = [1,1,]
p_inference_method = 'svi'
p_back_cstr = True
p_rnn_type='gru'
p_rnn_hidden_dims=[20,]
p_Q = p_Q
p_win_in = task.win_in
p_win_out = task.win_out
result = list()
for i_no in range(0, p_iter_num): # iterations take into account model randomness e.g. initialization of inducing points
res = rgp_experiment_bcstr_raw(p_task_name, bo_iter_no*10+i_no, train_U, train_Y, p_init_runs, p_max_runs, p_num_layers, p_hidden_dims,
p_inference_method, p_back_cstr, p_rnn_type, p_rnn_hidden_dims, p_Q, p_win_in, p_win_out)
result.append(res[0])
#import pdb; pdb.set_trace()
return np.array(((np.min(result),),))
def rgp_experiment_bcstr_raw(p_task_name, p_iteration, train_U, train_Y, p_init_runs, p_max_runs, p_num_layers, p_hidden_dims,
p_inference_method, p_back_cstr, p_rnn_type, p_rnn_hidden_dims, p_Q, p_win_in, p_win_out):
"""
Experiment file for NON MINIBATCH inference.
So, DeepAutoreg is run here.
Inputs:
-------------------------------
p_task_name: string
Experiment name, used only in file name
p_iteration: int or string
Iteration of the experiment, used only in file name
p_init_runs: int:
Number of initial runs when likelihood variances and covariance magnitudes are fixed
p_max_runs: int
Maximum runs of general optimization
p_num_layers: int [1,2]
Number of RGP layers
p_hidden_dims: list[ length is the number of hidden layers]
Dimensions of hidden layers
p_inference_method: string
If 'svi' then SVI inference is used.
p_back_cstr: bool
Use back constrains or not.
p_rnn_hidden_dims: int
Hidden dimension of neural network.
p_Q: int
Number of inducing points
p_win_in, p_win_out: int
Inpput window and hidden layer window.
"""
win_in = p_win_in # 20
win_out = p_win_out # 20
inference_method = p_inference_method if p_inference_method == 'svi' else None
#import pdb; pdb.set_trace()
if p_num_layers == 1:
# 1 layer:
wins = [0, win_out] # 0-th is output layer
nDims = [train_Y.shape[1], p_hidden_dims[0]]
kernels=[GPy.kern.RBF(win_out,ARD=True,inv_l=True),
GPy.kern.RBF(win_in + win_out,ARD=True,inv_l=True)]
elif p_num_layers == 2:
# 2 layers:
wins = [0, win_out, win_out]
nDims = [train_Y.shape[1], p_hidden_dims[0], p_hidden_dims[1]]
kernels=[GPy.kern.RBF(win_out,ARD=True,inv_l=True),
GPy.kern.RBF(win_out+win_out,ARD=True,inv_l=True),
GPy.kern.RBF(win_out+win_in,ARD=True,inv_l=True)]
else:
raise NotImplemented()
print("Input window: ", win_in)
print("Output window: ", win_out)
p_Q = 120 #!!!!! TODO:
m = autoreg.DeepAutoreg_rnn(wins, train_Y, U=train_U, U_win=win_in,
num_inducing=p_Q, back_cstr=p_back_cstr, nDims=nDims,
rnn_type=p_rnn_type,
rnn_hidden_dims=p_rnn_hidden_dims,
minibatch_inference=False,
inference_method=inference_method, # Inference method
kernels=kernels)
# pattern for model name: #task_name, inf_meth=?, wins=layers, Q = ?, backcstr=?,MLP_dims=?, nDims=
model_file_name = '%s_%s--inf_meth=%s--backcstr=%s--wins=%s_%s--Q=%i--nDims=%s' % (p_task_name, str(p_iteration),
'reg' if inference_method is None else inference_method,
str(p_back_cstr) if p_back_cstr==False else str(p_back_cstr) + '_' + p_rnn_type + str(p_rnn_hidden_dims[0]),
str(win_in), str(wins), p_Q, str(nDims))
print('Model file name: ', model_file_name)
print(m)
import pdb; pdb.set_trace()
#Initialization
# Here layer numbers are different than in initialization. 0-th layer is the top one
for i in range(m.nLayers):
m.layers[i].kern.inv_l[:] = np.mean( 1./((m.layers[i].X.mean.values.max(0)-m.layers[i].X.mean.values.min(0))/np.sqrt(2.)) )
m.layers[i].likelihood.variance[:] = 0.01*train_Y.var()
m.layers[i].kern.variance.fix(warning=False)
m.layers[i].likelihood.fix(warning=False)
print(m)
#init_runs = 50 if out_train.shape[0]<1000 else 100
print("Init runs: ", p_init_runs)
m.optimize('bfgs',messages=1,max_iters=p_init_runs)
for i in range(m.nLayers):
m.layers[i].kern.variance.constrain_positive(warning=False)
m.layers[i].likelihood.constrain_positive(warning=False)
m.optimize('bfgs',messages=1,max_iters=p_max_runs)
io.savemat(model_file_name, {'params': m.param_array[:]} )
print(m)
return -float(m._log_marginal_likelihood), m
def rgp_experiment_raw(p_task_name, p_iteration, train_U, train_Y, p_init_runs, p_max_runs, p_num_layers, p_hidden_dims,
p_inference_method, p_back_cstr, p_MLP_Dims, p_Q, p_win_in, p_win_out, p_init, p_x_init_var):
"""
Experiment file for NON MINIBATCH inference.
So, DeepAutoreg is run here.
Inputs:
-------------------------------
p_task_name: string
Experiment name, used only in file name
p_iteration: int or string
Iteration of the experiment, used only in file name
p_init_runs: int:
Number of initial runs when likelihood variances and covariance magnitudes are fixed
p_max_runs: int
Maximum runs of general optimization
p_num_layers: int [1,2]
Number of RGP layers
p_hidden_dims: list[ length is the number of hidden layers]
Dimensions of hidden layers
p_inference_method: string
If 'svi' then SVI inference is used.
p_back_cstr: bool
Use back constrains or not.
p_MLP_Dims: list[length is the number of MLP hidden layers, ignoring input and output layers]
Values are the number of neurons at each layer.
p_Q: int
Number of inducing points
p_win_in, p_win_out: int
Inpput window and hidden layer window.
p_init: string 'Y', 'rand', 'zero'
Initialization of RGP hidden layers
p_x_init_var: float
Initial variance for X, usually 0.05 for data close to normalized data.
"""
win_in = p_win_in # 20
win_out = p_win_out # 20
inference_method = p_inference_method if p_inference_method == 'svi' else None
#import pdb; pdb.set_trace()
if p_num_layers == 1:
# 1 layer:
wins = [0, win_out] # 0-th is output layer
nDims = [train_Y.shape[1], p_hidden_dims[0]]
kernels=[GPy.kern.RBF(win_out,ARD=True,inv_l=True),
GPy.kern.RBF(win_in + win_out,ARD=True,inv_l=True)]
elif p_num_layers == 2:
# 2 layers:
wins = [0, win_out, win_out]
nDims = [train_Y.shape[1], p_hidden_dims[0], p_hidden_dims[1]]
kernels=[GPy.kern.RBF(win_out,ARD=True,inv_l=True),
GPy.kern.RBF(win_out+win_out,ARD=True,inv_l=True),
GPy.kern.RBF(win_out+win_in,ARD=True,inv_l=True)]
else:
raise NotImplemented()
print("Input window: ", win_in)
print("Output window: ", win_out)
m = autoreg.DeepAutoreg_new(wins, train_Y, U=train_U, U_win=win_in,
num_inducing=p_Q, back_cstr=p_back_cstr, MLP_dims=p_MLP_Dims, nDims=nDims,
init=p_init, # how to initialize hidden states means
X_variance=p_x_init_var, #0.05, # how to initialize hidden states variances
inference_method=inference_method, # Inference method
kernels=kernels)
# pattern for model name: #task_name, inf_meth=?, wins=layers, Q = ?, backcstr=?,MLP_dims=?, nDims=
model_file_name = '%s_%s--inf_meth=%s--backcstr=%s--wins=%s_%s--Q=%i--nDims=%s--init=%s--x_init=%s' % (p_task_name, str(p_iteration),
'reg' if inference_method is None else inference_method,
str(p_back_cstr) if p_back_cstr==False else str(p_back_cstr) + '_' + str(p_MLP_Dims),
str(win_in), str(wins), p_Q, str(nDims), p_init, str(p_x_init_var))
print('Model file name: ', model_file_name)
print(m)
#import pdb; pdb.set_trace()
#Initialization
# Here layer numbers are different than in initialization. 0-th layer is the top one
for i in range(m.nLayers):
m.layers[i].kern.inv_l[:] = np.mean( 1./((m.layers[i].X.mean.values.max(0)-m.layers[i].X.mean.values.min(0))/np.sqrt(2.)) )
m.layers[i].likelihood.variance[:] = 0.01*train_Y.var()
m.layers[i].kern.variance.fix(warning=False)
m.layers[i].likelihood.fix(warning=False)
print(m)
#init_runs = 50 if out_train.shape[0]<1000 else 100
print("Init runs: ", p_init_runs)
m.optimize('bfgs',messages=1,max_iters=p_init_runs)
for i in range(m.nLayers):
m.layers[i].kern.variance.constrain_positive(warning=False)
m.layers[i].likelihood.constrain_positive(warning=False)
m.optimize('bfgs',messages=1,max_iters=p_max_runs)
io.savemat(model_file_name, {'params': m.param_array[:]} )
print(m)
return -float(m._log_marginal_likelihood), m
def bo_run_1():
"""
Run the bayesian optimization experiemnt 1.
Tested parameters are: initial number of optimization runs, number of hidden dims, number of inducing points.
After the first experiment the conclusion is that 1 hidden dim is the best, but also
the optimization is not very explorative.
Probably there was an error in the experi,ent setup because I did not change the number of hidden layers
only the number of hidden dimensions in 1 layer.
Best values: ini_runs = 160.0, hidden dim=1., Q=50. (237.44060068)
Iteration 21
"""
import GPyOpt
import pickle
exper = IE5_experiment_1(0,4)
domain =[{'name': 'init_runs', 'type': 'discrete', 'domain': np.arange(10,201,10) },
{'name': 'hidden_dims', 'type': 'discrete', 'domain': (1,2,3,4)},
{'name': 'Q', 'type': 'discrete', 'domain': np.arange(10,201,10) } ]
Bopt = GPyOpt.methods.BayesianOptimization(f=exper, # function to optimize
domain=domain, # box-constrains of the problem
model_type = 'GP_MCMC',
initial_design_numdata = 2,# number data initial design
acquisition_type='EI_MCMC', # Expected Improvement
exact_feval = False)
#import pdb; pdb.set_trace()
# --- Stop conditions
max_time = None
max_iter = 20
tolerance = 1e-4 # distance between two consecutive observations
# Run the optimization
report_file = 'report_IE5_experiment_1(0,1)'
evaluations_file = 'eval_IE5_experiment_1(0,1)'
models_file = 'model_IE5_experiment_1(0,1)'
Bopt.run_optimization(max_iter = max_iter, max_time = max_time, eps = tolerance, verbosity=True,
report_file = report_file, evaluations_file= evaluations_file, models_file=models_file)
#acquisition_type ='LCB', # LCB acquisition
#acquisition_weight = 0.1)
ff = open('IE5_experiment_1(0,4)','w')
pickle.dump(Bopt,ff)
ff.close()
def bo_run_2():
"""
Run the bayesian optimization experiemnt 2.
Tested parameters are: initial number of optimization runs, number of inducing points.
Conclusions after the experiment: The output file contains only variables as var_1, var_2 etc.
but Xavier said that the order is presearved.
The optimal values are: init_runs = 110, Q (ind. num) = 200. (run 3), 240.44817869
Bu the results are still the same from run to run.
Total running time was 40hours on GPU machine.
Maybe we can reduce the number of intrinsic iterations per evaluation.
Now idea is to use manually designad initial values to run more proper experiment. (Experiment 3)
"""
import GPyOpt
import pickle
exper = IE5_experiment_2(0,3)
domain =[{'name': 'init_runs', 'type': 'discrete', 'domain': np.arange(50,201,10) },
#{'name': 'hidden_dims', 'type': 'discrete', 'domain': (1,2,3,4)},
{'name': 'Q', 'type': 'discrete', 'domain': np.arange(40,201,10) } ]
Bopt = GPyOpt.methods.BayesianOptimization(f=exper, # function to optimize
domain=domain, # box-constrains of the problem
model_type = 'GP_MCMC',
initial_design_numdata = 3,# number data initial design
acquisition_type='EI_MCMC', # Expected Improvement
exact_feval = False)
#import pdb; pdb.set_trace()
# --- Stop conditions
max_time = None
max_iter = 7
tolerance = 1e-4 # distance between two consecutive observations
# Run the optimization
report_file = 'report_IE5_experiment_2(0,3)'
evaluations_file = 'eval_IE5_experiment_2(0,3)'
models_file = 'model_IE5_experiment_2(0,3)'
Bopt.run_optimization(max_iter = max_iter, max_time = max_time, eps = tolerance, verbosity=True,
report_file = report_file, evaluations_file= evaluations_file, models_file=models_file, acquisition_par=3) # acquisition_par is
# used to make it more explorative. It seems it did not help.
#acquisition_type ='LCB', # LCB acquisition
#acquisition_weight = 0.1)
ff = open('IE5_experiment_2(0,3)','w')
pickle.dump(Bopt,ff)
ff.close()
def bo_run_3():
"""
Run the bayesian optimization experiemnt 2.
Tested parameters are: initial number of number of layers, number of inducing points.
Also, I do initial evaluations manually.
Best values: 1 layer, 40 inducing points, (run 7) 242.67636311
This value is laregr than in the other experiments. Manybe becuase there were only 2 internal runs
in every function evaluation.
"""
import GPyOpt
import pickle
exper = IE5_experiment_3(0,2)
domain =[ #{ 'name': 'init_runs', 'type': 'discrete', 'domain': np.arange(50,201,10) },
#{'name': 'hidden_dims', 'type': 'discrete', 'domain': (1,2,3,4)},
{ 'name': 'layer_num', 'type': 'discrete', 'domain': (1,2) },
{'name': 'Q', 'type': 'discrete', 'domain': np.arange(40,201,10) } ]
#out = exper( np.array( (( 2.0,100.0),) ) ) # input_shape: (array([[ 2., 120.]]),) ### outputshape: array([[ 413.67619157]])
input1 = np.array( (( 1.0,50.0),) ); out1 = exper( input1 )
input2 = np.array( (( 2.0,50.0),) ); out2 = exper( input2 )
input3 = np.array( (( 1.0,100.0),) ); out3 = exper( input3 )
input4 = np.array( (( 2.0,100.0),) ); out4 = exper( input4 )
input5 = np.array( (( 1.0,200.0),) ); out5 = exper( input5 )
input6 = np.array( (( 2.0,200.0),) ); out6 = exper( input6 )
# init_input = np.vstack( (input1,input2,) )
# init_out = np.vstack( (out1,out2,) )
init_input = np.vstack( (input1,input2,input3,input4,input5,input6) )
init_out = np.vstack( (out1,out2,out3,out4,out5,out6) )
#import pdb; pdb.set_trace(); #return
#exper()
#import pdb; pdb.set_trace()
Bopt = GPyOpt.methods.BayesianOptimization(f=exper, # function to optimize
domain=domain, # box-constrains of the problem
model_type = 'GP_MCMC',
X=init_input,
Y=init_out,
#initial_design_numdata = 3,# number data initial design
acquisition_type='EI_MCMC', # Expected Improvement
exact_feval = False)
#import pdb; pdb.set_trace(); #return
# --- Stop conditions
max_time = None
max_iter = 10
tolerance = 1e-4 # distance between two consecutive observations
# Run the optimization
report_file = 'report_IE5_experiment_3(0,2)'
evaluations_file = 'eval_IE5_experiment_3(0,2)'
models_file = 'model_IE5_experiment_3(0,2)'
Bopt.run_optimization(max_iter = max_iter, max_time = max_time, eps = tolerance, verbosity=True,
report_file = report_file, evaluations_file= evaluations_file, models_file=models_file, acquisition_par=3) # acquisition_par is
# used to make it more explorative. It seems it did not help.
#acquisition_type ='LCB', # LCB acquisition
#acquisition_weight = 0.1)
ff = open('IE5_experiment_3(0,2)','w')
pickle.dump(Bopt,ff)
ff.close()
def bo_run_4():
"""
Run the bayesian optimization experiemnt 2.
SVI inference
Tested parameters are: number of initial runs, number of inducing points.
First SVI experiment.
"""
import GPyOpt
import pickle
exper = IE5_experiment_4(0,3)
domain =[{'name': 'init_runs', 'type': 'discrete', 'domain': np.arange(50,501,50) },
#{'name': 'hidden_dims', 'type': 'discrete', 'domain': (1,2,3,4)},
{'name': 'Q', 'type': 'discrete', 'domain': np.arange(40,201,10) } ]
Bopt = GPyOpt.methods.BayesianOptimization(f=exper, # function to optimize
domain=domain, # box-constrains of the problem
model_type = 'GP_MCMC',
initial_design_numdata = 5,# number data initial design
acquisition_type='EI_MCMC', # Expected Improvement
exact_feval = False)
#import pdb; pdb.set_trace()
# --- Stop conditions
max_time = None
max_iter = 2
tolerance = 1e-4 # distance between two consecutive observations
# Run the optimization
report_file = 'report_IE5_experiment_4(0,3)'
evaluations_file = 'eval_IE5_experiment_4(0,3)'
models_file = 'model_IE5_experiment_4(0,3)'
Bopt.run_optimization(max_iter = max_iter, max_time = max_time, eps = tolerance, verbosity=True,
report_file = report_file, evaluations_file= evaluations_file, models_file=models_file, acquisition_par=3) # acquisition_par is
# used to make it more explorative. It seems it did not help.
#acquisition_type ='LCB', # LCB acquisition
#acquisition_weight = 0.1)
ff = open('IE5_experiment_4(0,3)','w')
pickle.dump(Bopt,ff)
ff.close()
def bo_run_5():
"""
Run the bayesian optimization experiemnt 5.
Back constrains + SVI inference
Tested parameters are: number of initial runs, number of inducing points.
The optimal value: 340.816199019 350.0(init_runs) 120.0(ind points), iteration 9 (8 in file names)
"""
import GPyOpt
import pickle
exper = IE5_experiment_5(0,3)
domain =[{'name': 'init_runs', 'type': 'discrete', 'domain': np.arange(50,501,50) },
#{'name': 'hidden_dims', 'type': 'discrete', 'domain': (1,2,3,4)},
{'name': 'Q', 'type': 'discrete', 'domain': np.arange(40,201,10) } ]
Bopt = GPyOpt.methods.BayesianOptimization(f=exper, # function to optimize
domain=domain, # box-constrains of the problem
model_type = 'GP_MCMC',
initial_design_numdata = 5,# number data initial design
acquisition_type='EI_MCMC', # Expected Improvement
exact_feval = False)
#import pdb; pdb.set_trace()
# --- Stop conditions
max_time = None
max_iter = 5
tolerance = 1e-4 # distance between two consecutive observations
# Run the optimization
report_file = 'report_IE5_experiment_5(0,3)'
evaluations_file = 'eval_IE5_experiment_5(0,3)'
models_file = 'model_IE5_experiment_5(0,3)'
Bopt.run_optimization(max_iter = max_iter, max_time = max_time, eps = tolerance, verbosity=True,
report_file = report_file, evaluations_file= evaluations_file, models_file=models_file, acquisition_par=3) # acquisition_par is
# used to make it more explorative. It seems it did not help.
#acquisition_type ='LCB', # LCB acquisition
#acquisition_weight = 0.1)
ff = open('IE5_experiment_5(0,3)','w')
pickle.dump(Bopt,ff)
ff.close()
def bo_run_6():
"""
Run the bayesian optimization experiemnt 6.
Same as experiment 5, only the model has changes now it includes the RGP inputs at
encoder inputs.
Back constrains + SVI inference
Tested parameters are: number of initial runs, number of inducing points.
The optimal values: 361.667338238 300.0(init_runs) 80.0(ind points), inter. 4 (3 in file name)
"""
import GPyOpt
import pickle
exper = IE5_experiment_6(0,2)
domain =[{'name': 'init_runs', 'type': 'discrete', 'domain': np.arange(50,501,50) },
#{'name': 'hidden_dims', 'type': 'discrete', 'domain': (1,2,3,4)},
{'name': 'Q', 'type': 'discrete', 'domain': np.arange(40,201,10) } ]
Bopt = GPyOpt.methods.BayesianOptimization(f=exper, # function to optimize
domain=domain, # box-constrains of the problem
model_type = 'GP_MCMC',
initial_design_numdata = 5,# number data initial design
acquisition_type='EI_MCMC', # Expected Improvement
exact_feval = False)
#import pdb; pdb.set_trace()
# --- Stop conditions
max_time = None
max_iter = 2
tolerance = 1e-4 # distance between two consecutive observations
# Run the optimization
report_file = 'report_IE5_experiment_6(0,2)'
evaluations_file = 'eval_IE5_experiment_6(0,2)'
models_file = 'model_IE5_experiment_6(0,2)'
Bopt.run_optimization(max_iter = max_iter, max_time = max_time, eps = tolerance, verbosity=True,
report_file = report_file, evaluations_file= evaluations_file, models_file=models_file, acquisition_par=3) # acquisition_par is
# used to make it more explorative. It seems it did not help.
#acquisition_type ='LCB', # LCB acquisition
#acquisition_weight = 0.1)
ff = open('IE5_experiment_6(0,2)','w')
pickle.dump(Bopt,ff)
ff.close()
def bo_run_7():
"""
Run the bayesian optimization experiemnt 7.
Same as experiment 6, but with ARD kernel, different tolerance
and different max_iter.
"""
import GPyOpt
import pickle
exper = IE5_experiment_6(0,2)
domain =[{'name': 'init_runs', 'type': 'discrete', 'domain': np.arange(200,501,30) },
#{'name': 'hidden_dims', 'type': 'discrete', 'domain': (1,2,3,4)},
{'name': 'Q', 'type': 'discrete', 'domain': np.arange(60,201,10) } ]
kernel = GPy.kern.RBF(len(domain),ARD=True)
Bopt = GPyOpt.methods.BayesianOptimization(f=exper, # function to optimize
domain=domain, # box-constrains of the problem
model_type = 'GP_MCMC',
kernel=kernel,
initial_design_numdata = 3,# number data initial design
acquisition_type='EI_MCMC', # Expected Improvement
exact_feval = False)
#import pdb; pdb.set_trace()
# --- Stop conditions
max_time = None
max_iter = 7
tolerance = 1e-2 # distance between two consecutive observations
# Run the optimization
report_file = 'report_IE5_experiment_7(0,2)'
evaluations_file = 'eval_IE5_experiment_7(0,2)'
models_file = 'model_IE5_experiment_7(0,2)'
Bopt.run_optimization(max_iter = max_iter, max_time = max_time, eps = tolerance, verbosity=True,
report_file = report_file, evaluations_file= evaluations_file, models_file=models_file, acquisition_par=3) # acquisition_par is
# used to make it more explorative. It seems it did not help.
#acquisition_type ='LCB', # LCB acquisition
#acquisition_weight = 0.1)
ff = open('IE5_experiment_7(0,2)','w')
pickle.dump(Bopt,ff)
ff.close()
if __name__ == '__main__':
bo_run_5()
|
bsd-3-clause
|
adamtiger/tensorflow
|
tensorflow/contrib/labeled_tensor/python/ops/ops.py
|
77
|
46403
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Non-core ops for LabeledTensor."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import types
import numpy as np
from six import string_types
from tensorflow.contrib.labeled_tensor.python.ops import _typecheck as tc
from tensorflow.contrib.labeled_tensor.python.ops import core
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import numerics
from tensorflow.python.ops import random_ops
from tensorflow.python.training import input # pylint: disable=redefined-builtin
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensor, ops.Tensor, core.Axis,
tc.Optional(string_types))
def _gather_1d_on_axis(labeled_tensor, indexer, axis, name=None):
with ops.name_scope(name, 'lt_take', [labeled_tensor]) as scope:
temp_axes = core.Axes([axis] + list(
labeled_tensor.axes.remove(axis.name).values()))
transposed = core.transpose(labeled_tensor, temp_axes.keys())
indexed = core.LabeledTensor(
array_ops.gather(transposed.tensor, indexer), temp_axes)
return core.transpose(indexed, labeled_tensor.axes.keys(), name=scope)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike,
tc.Mapping(string_types,
tc.Union(slice, collections.Hashable, list)),
tc.Optional(string_types))
def select(labeled_tensor, selection, name=None):
"""Slice out a subset of the tensor.
Args:
labeled_tensor: The input tensor.
selection: A dictionary mapping an axis name to a scalar, slice or list of
values to select. Currently supports two types of selections:
(a) Any number of scalar and/or slice selections.
(b) Exactly one list selection, without any scalars or slices.
name: Optional op name.
Returns:
The selection as a `LabeledTensor`.
Raises:
ValueError: If the tensor doesn't have an axis in the selection or if
that axis lacks labels.
KeyError: If any labels in a selection are not found in the original axis.
NotImplementedError: If you attempt to combine a list selection with
scalar selection or another list selection.
"""
with ops.name_scope(name, 'lt_select', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
slices = {}
indexers = {}
for axis_name, value in selection.items():
if axis_name not in labeled_tensor.axes:
raise ValueError(
'The tensor does not have an axis named %s. Its axes are: %r' %
(axis_name, labeled_tensor.axes.keys()))
axis = labeled_tensor.axes[axis_name]
if axis.labels is None:
raise ValueError(
'The axis named %s does not have labels. The axis is: %r' %
(axis_name, axis))
if isinstance(value, slice):
# TODO(shoyer): consider deprecating using slices in favor of lists
if value.start is None:
start = None
else:
start = axis.index(value.start)
if value.stop is None:
stop = None
else:
# For now, follow the pandas convention of making labeled slices
# inclusive of both bounds.
stop = axis.index(value.stop) + 1
if value.step is not None:
raise NotImplementedError('slicing with a step is not yet supported')
slices[axis_name] = slice(start, stop)
# Needs to be after checking for slices, since slice objects claim to be
# instances of collections.Hashable but hash() on them fails.
elif isinstance(value, collections.Hashable):
slices[axis_name] = axis.index(value)
elif isinstance(value, list):
if indexers:
raise NotImplementedError(
'select does not yet support more than one list selection at '
'the same time')
indexer = [axis.index(v) for v in value]
indexers[axis_name] = ops.convert_to_tensor(indexer, dtype=dtypes.int64)
else:
# If type checking is working properly, this shouldn't be possible.
raise TypeError('cannot handle arbitrary types')
if indexers and slices:
raise NotImplementedError(
'select does not yet support combined scalar and list selection')
# For now, handle array selection separately, because tf.gather_nd does
# not support gradients yet. Later, using gather_nd will let us combine
# these paths.
if indexers:
(axis_name, indexer), = indexers.items()
axis = core.Axis(axis_name, selection[axis_name])
return _gather_1d_on_axis(labeled_tensor, indexer, axis, name=scope)
else:
return core.slice_function(labeled_tensor, slices, name=scope)
@tc.returns(core.LabeledTensor)
@tc.accepts(
tc.Collection(core.LabeledTensorLike), string_types,
tc.Optional(string_types))
def concat(labeled_tensors, axis_name, name=None):
"""Concatenate tensors along a dimension.
See tf.concat.
Args:
labeled_tensors: A list of input LabeledTensors.
axis_name: The name of the axis along which to concatenate.
name: Optional op name.
Returns:
The concatenated tensor.
The coordinate labels for the concatenation dimension are also concatenated,
if they are available for every tensor.
Raises:
ValueError: If fewer than one tensor inputs is provided, if the tensors
have incompatible axes, or if `axis_name` isn't the name of an axis.
"""
with ops.name_scope(name, 'lt_concat', labeled_tensors) as scope:
labeled_tensors = [
core.convert_to_labeled_tensor(lt) for lt in labeled_tensors
]
if len(labeled_tensors) < 1:
raise ValueError('concat expects at least 1 tensor, but received %s' %
labeled_tensors)
# All tensors must have these axes.
axes_0 = labeled_tensors[0].axes
axis_names = list(axes_0.keys())
if axis_name not in axis_names:
raise ValueError('%s not in %s' % (axis_name, axis_names))
shared_axes = axes_0.remove(axis_name)
tensors = [labeled_tensors[0].tensor]
concat_axis_list = [axes_0[axis_name]]
for labeled_tensor in labeled_tensors[1:]:
current_shared_axes = labeled_tensor.axes.remove(axis_name)
if current_shared_axes != shared_axes:
# TODO(shoyer): add more specific checks about what went wrong,
# including raising AxisOrderError when appropriate
raise ValueError('Mismatched shared axes: the first tensor '
'had axes %r but this tensor has axes %r.' %
(shared_axes, current_shared_axes))
# Accumulate the axis labels, if they're available.
concat_axis_list.append(labeled_tensor.axes[axis_name])
tensors.append(labeled_tensor.tensor)
concat_axis = core.concat_axes(concat_axis_list)
concat_dimension = axis_names.index(axis_name)
concat_tensor = array_ops.concat(tensors, concat_dimension, name=scope)
values = list(axes_0.values())
concat_axes = (values[:concat_dimension] + [concat_axis] +
values[concat_dimension + 1:])
return core.LabeledTensor(concat_tensor, concat_axes)
# TODO(shoyer): rename pack/unpack to stack/unstack
@tc.returns(core.LabeledTensor)
@tc.accepts(
tc.Collection(core.LabeledTensorLike),
tc.Union(string_types, core.AxisLike), int, tc.Optional(string_types))
def pack(labeled_tensors, new_axis, axis_position=0, name=None):
"""Pack tensors along a new axis.
See tf.pack.
Args:
labeled_tensors: The input tensors, which must have identical axes.
new_axis: The name of the new axis, or a tuple containing the name
and coordinate labels.
axis_position: Optional integer position at which to insert the new axis.
name: Optional op name.
Returns:
The packed tensors as a single LabeledTensor, with `new_axis` in the given
`axis_position`.
Raises:
ValueError: If fewer than one input tensors is provided, or if the tensors
don't have identical axes.
"""
with ops.name_scope(name, 'lt_pack', labeled_tensors) as scope:
labeled_tensors = [
core.convert_to_labeled_tensor(lt) for lt in labeled_tensors
]
if len(labeled_tensors) < 1:
raise ValueError('pack expects at least 1 tensors, but received %s' %
labeled_tensors)
axes_0 = labeled_tensors[0].axes
for t in labeled_tensors:
if t.axes != axes_0:
raise ValueError('Non-identical axes. Expected %s but got %s' %
(axes_0, t.axes))
pack_op = array_ops.stack(
[t.tensor for t in labeled_tensors], axis=axis_position, name=scope)
axes = list(axes_0.values())
axes.insert(axis_position, new_axis)
return core.LabeledTensor(pack_op, axes)
@tc.returns(tc.List(core.LabeledTensor))
@tc.accepts(core.LabeledTensorLike,
tc.Optional(string_types), tc.Optional(string_types))
def unpack(labeled_tensor, axis_name=None, name=None):
"""Unpack the tensor.
See tf.unpack.
Args:
labeled_tensor: The input tensor.
axis_name: Optional name of axis to unpack. By default, the first axis is
used.
name: Optional op name.
Returns:
The list of unpacked LabeledTensors.
Raises:
ValueError: If `axis_name` is not an axis on the input.
"""
with ops.name_scope(name, 'lt_unpack', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
axis_names = list(labeled_tensor.axes.keys())
if axis_name is None:
axis_name = axis_names[0]
if axis_name not in axis_names:
raise ValueError('%s not in %s' % (axis_name, axis_names))
axis = axis_names.index(axis_name)
unpack_ops = array_ops.unstack(labeled_tensor.tensor, axis=axis, name=scope)
axes = [a for i, a in enumerate(labeled_tensor.axes.values()) if i != axis]
return [core.LabeledTensor(t, axes) for t in unpack_ops]
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike,
tc.Collection(string_types),
tc.Collection(tc.Union(string_types, core.AxisLike)),
tc.Optional(string_types))
def reshape(labeled_tensor, existing_axes, new_axes, name=None):
"""Reshape specific axes of a LabeledTensor.
Non-indicated axes remain in their original locations.
Args:
labeled_tensor: The input tensor.
existing_axes: List of axis names found on the input tensor. These must
appear sequentially in the list of axis names on the input. In other
words, they must be a valid slice of `list(labeled_tensor.axes.keys())`.
new_axes: List of strings, tuples of (axis_name, axis_value) or Axis objects
providing new axes with which to replace `existing_axes` in the reshaped
result. At most one element of `new_axes` may be a string, indicating an
axis with unknown size.
name: Optional op name.
Returns:
The reshaped LabeledTensor.
Raises:
ValueError: If `existing_axes` are not all axes on the input, or if more
than one of `new_axes` has unknown size.
AxisOrderError: If `existing_axes` are not a slice of axis names on the
input.
"""
with ops.name_scope(name, 'lt_reshape', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
original_axis_names = list(labeled_tensor.axes.keys())
existing_axes = list(existing_axes)
if not set(existing_axes) <= set(original_axis_names):
raise ValueError('existing_axes %r are not contained in the set of axis '
'names %r on the input labeled tensor' %
(existing_axes, original_axis_names))
start = original_axis_names.index(existing_axes[0])
stop = original_axis_names.index(existing_axes[-1]) + 1
if existing_axes != original_axis_names[start:stop]:
# We could support existing_axes that aren't a slice by using transpose,
# but that could lead to unpredictable performance consequences because
# transposes are not free in TensorFlow. If we did transpose
# automatically, the user might never realize that their data is being
# produced with the wrong order. (The later will occur with some frequency
# because of how broadcasting automatically choose axis order.)
# So for now we've taken the strict approach.
raise core.AxisOrderError(
'existing_axes %r are not a slice of axis names %r on the input '
'labeled tensor. Use `transpose` or `impose_axis_order` to reorder '
'axes on the input explicitly.' %
(existing_axes, original_axis_names))
if sum(isinstance(axis, string_types) for axis in new_axes) > 1:
raise ValueError(
'at most one axis in new_axes can have unknown size. All other '
'axes must have an indicated integer size or labels: %r' % new_axes)
original_values = list(labeled_tensor.axes.values())
axis_size = lambda axis: -1 if axis.size is None else axis.size
shape = [axis_size(axis) for axis in original_values[:start]]
for axis_ref in new_axes:
if isinstance(axis_ref, string_types):
shape.append(-1)
else:
axis = core.as_axis(axis_ref)
shape.append(axis_size(axis))
shape.extend(axis_size(axis) for axis in original_values[stop:])
reshaped_tensor = array_ops.reshape(
labeled_tensor.tensor, shape, name=scope)
axes = original_values[:start] + list(new_axes) + original_values[stop:]
return core.LabeledTensor(reshaped_tensor, axes)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike, string_types, string_types,
tc.Optional(string_types))
def rename_axis(labeled_tensor, existing_name, new_name, name=None):
"""Rename an axis of LabeledTensor.
Args:
labeled_tensor: The input tensor.
existing_name: Name for an existing axis on the input.
new_name: Desired replacement name.
name: Optional op name.
Returns:
LabeledTensor with renamed axis.
Raises:
ValueError: If `existing_name` is not an axis on the input.
"""
with ops.name_scope(name, 'lt_rename_axis', [labeled_tensor]) as scope:
if existing_name not in labeled_tensor.axes:
raise ValueError('existing_name %r are not contained in the set of axis '
'names %r on the input labeled tensor' %
(existing_name, labeled_tensor.axes.keys()))
new_axis = core.Axis(new_name, labeled_tensor.axes[existing_name].value)
return reshape(labeled_tensor, [existing_name], [new_axis], name=scope)
@tc.returns(tc.List(core.LabeledTensor))
@tc.accepts(string_types, collections.Callable, int, bool,
tc.Collection(core.LabeledTensorLike), bool,
tc.Optional(string_types))
def _batch_helper(default_name,
batch_fn,
batch_size,
enqueue_many,
labeled_tensors,
allow_smaller_final_batch,
name=None):
with ops.name_scope(name, default_name, labeled_tensors) as scope:
labeled_tensors = [
core.convert_to_labeled_tensor(lt) for lt in labeled_tensors
]
batch_ops = batch_fn([t.tensor for t in labeled_tensors], scope)
# TODO(shoyer): Remove this when they sanitize the TF API.
if not isinstance(batch_ops, list):
assert isinstance(batch_ops, ops.Tensor)
batch_ops = [batch_ops]
if allow_smaller_final_batch:
batch_size = None
@tc.returns(core.Axes)
@tc.accepts(core.Axes)
def output_axes(axes):
if enqueue_many:
if 'batch' not in axes or list(axes.keys()).index('batch') != 0:
raise ValueError(
'When enqueue_many is True, input tensors must have an axis '
'called "batch" as their first dimension, '
'but axes were %s' % axes)
culled_axes = axes.remove('batch')
return core.Axes([('batch', batch_size)] + list(culled_axes.values()))
else:
return core.Axes([('batch', batch_size)] + list(axes.values()))
output_labeled_tensors = []
for i, tensor in enumerate(batch_ops):
axes = output_axes(labeled_tensors[i].axes)
output_labeled_tensors.append(core.LabeledTensor(tensor, axes))
return output_labeled_tensors
@tc.returns(tc.List(core.LabeledTensor))
@tc.accepts(
tc.Collection(core.LabeledTensorLike), int, int, int, bool, bool,
tc.Optional(string_types))
def batch(labeled_tensors,
batch_size,
num_threads=1,
capacity=32,
enqueue_many=False,
allow_smaller_final_batch=False,
name=None):
"""Rebatch a tensor.
See tf.batch.
Args:
labeled_tensors: The input tensors.
batch_size: The output batch size.
num_threads: See tf.batch.
capacity: See tf.batch.
enqueue_many: If true, the input tensors must contain a 'batch' axis as
their first axis.
If false, the input tensors must not contain a 'batch' axis.
See tf.batch.
allow_smaller_final_batch: See tf.batch.
name: Optional op name.
Returns:
The rebatched tensors.
If enqueue_many is false, the output tensors will have a new 'batch' axis
as their first axis.
Raises:
ValueError: If enqueue_many is True and the first axis of the tensors
isn't "batch".
"""
def fn(tensors, scope):
return input.batch(
tensors,
batch_size=batch_size,
num_threads=num_threads,
capacity=capacity,
enqueue_many=enqueue_many,
allow_smaller_final_batch=allow_smaller_final_batch,
name=scope)
return _batch_helper('lt_batch', fn, batch_size, enqueue_many,
labeled_tensors, allow_smaller_final_batch, name)
@tc.returns(tc.List(core.LabeledTensor))
@tc.accepts(
tc.Collection(core.LabeledTensorLike), int, int, int, bool, int,
tc.Optional(int), bool, tc.Optional(string_types))
def shuffle_batch(labeled_tensors,
batch_size,
num_threads=1,
capacity=32,
enqueue_many=False,
min_after_dequeue=0,
seed=None,
allow_smaller_final_batch=False,
name=None):
"""Rebatch a tensor, with shuffling.
See tf.batch.
Args:
labeled_tensors: The input tensors.
batch_size: The output batch size.
num_threads: See tf.batch.
capacity: See tf.batch.
enqueue_many: If true, the input tensors must contain a 'batch' axis as
their first axis.
If false, the input tensors must not contain a 'batch' axis.
See tf.batch.
min_after_dequeue: Minimum number of elements in the queue after a dequeue,
used to ensure mixing.
seed: Optional random seed.
allow_smaller_final_batch: See tf.batch.
name: Optional op name.
Returns:
The rebatched tensors.
If enqueue_many is false, the output tensors will have a new 'batch' axis
as their first axis.
Raises:
ValueError: If enqueue_many is True and the first axis of the tensors
isn't "batch".
"""
def fn(tensors, scope):
return input.shuffle_batch(
tensors,
batch_size=batch_size,
num_threads=num_threads,
capacity=capacity,
enqueue_many=enqueue_many,
min_after_dequeue=min_after_dequeue,
seed=seed,
allow_smaller_final_batch=allow_smaller_final_batch,
name=scope)
return _batch_helper('lt_shuffle_batch', fn, batch_size, enqueue_many,
labeled_tensors, allow_smaller_final_batch, name)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike,
tc.Mapping(string_types, int),
tc.Optional(int), tc.Optional(string_types))
def random_crop(labeled_tensor, shape_map, seed=None, name=None):
"""Randomly crops a tensor to a given size.
See tf.random_crop.
Args:
labeled_tensor: The input tensor.
shape_map: A dictionary mapping axis names to the size of the random crop
for that dimension.
seed: An optional random seed.
name: An optional op name.
Returns:
A tensor of the same rank as `labeled_tensor`, cropped randomly in the
selected dimensions.
Raises:
ValueError: If the shape map contains an axis name not in the input tensor.
"""
with ops.name_scope(name, 'lt_random_crop', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
for axis_name in shape_map:
if axis_name not in labeled_tensor.axes:
raise ValueError('Selection axis %s not in axes %s' %
(axis_name, labeled_tensor.axes))
shape = []
axes = []
for axis in labeled_tensor.axes.values():
if axis.name in shape_map:
size = shape_map[axis.name]
shape.append(size)
# We lose labels for the axes we crop, leaving just the size.
axes.append((axis.name, size))
else:
shape.append(len(axis))
axes.append(axis)
crop_op = random_ops.random_crop(
labeled_tensor.tensor, shape, seed=seed, name=scope)
return core.LabeledTensor(crop_op, axes)
# TODO(shoyer): Allow the user to select the axis over which to map.
@tc.returns(core.LabeledTensor)
@tc.accepts(collections.Callable, core.LabeledTensorLike,
tc.Optional(string_types))
def map_fn(fn, labeled_tensor, name=None):
"""Map on the list of tensors unpacked from labeled_tensor.
See tf.map_fn.
Args:
fn: The function to apply to each unpacked LabeledTensor.
It should have type LabeledTensor -> LabeledTensor.
labeled_tensor: The input tensor.
name: Optional op name.
Returns:
A tensor that packs the results of applying fn to the list of tensors
unpacked from labeled_tensor.
"""
with ops.name_scope(name, 'lt_map_fn', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
unpack_lts = unpack(labeled_tensor)
# TODO(ericmc): Fix this upstream.
if labeled_tensor.dtype == dtypes.string:
# We must construct the full graph here, because functional_ops.map_fn
# doesn't work for string-valued tensors.
# Constructing the full graph may be slow.
map_lts = [fn(t) for t in unpack_lts]
return pack(map_lts, list(labeled_tensor.axes.values())[0], name=scope)
else:
# Figure out what the axis labels should be, but use tf.map_fn to
# construct the graph because it's efficient.
# It may be slow to construct the full graph, so we infer the labels from
# the first element.
# TODO(ericmc): This builds a subgraph which then gets thrown away.
# Find a more elegant solution.
first_map_lt = fn(unpack_lts[0])
final_axes = list(labeled_tensor.axes.values())[:1] + list(
first_map_lt.axes.values())
@tc.returns(ops.Tensor)
@tc.accepts(ops.Tensor)
def tf_fn(tensor):
original_axes = list(labeled_tensor.axes.values())[1:]
tensor_lt = core.LabeledTensor(tensor, original_axes)
return fn(tensor_lt).tensor
map_op = functional_ops.map_fn(tf_fn, labeled_tensor.tensor)
map_lt = core.LabeledTensor(map_op, final_axes)
return core.identity(map_lt, name=scope)
@tc.returns(core.LabeledTensor)
@tc.accepts(collections.Callable, core.LabeledTensorLike,
core.LabeledTensorLike, tc.Optional(string_types))
def foldl(fn, labeled_tensor, initial_value, name=None):
"""Left fold on the list of tensors unpacked from labeled_tensor.
See tf.foldl.
Args:
fn: The function to apply to each unpacked LabeledTensor.
It should have type (LabeledTensor, LabeledTensor) -> LabeledTensor.
Its arguments are (accumulated_value, next_value).
labeled_tensor: The input tensor.
initial_value: The initial value of the accumulator.
name: Optional op name.
Returns:
The accumulated value.
"""
with ops.name_scope(name, 'lt_foldl',
[labeled_tensor, initial_value]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
initial_value = core.convert_to_labeled_tensor(initial_value)
@tc.returns(ops.Tensor)
@tc.accepts(ops.Tensor, ops.Tensor)
def tf_fn(accumulator, next_element):
accumulator_lt = core.LabeledTensor(accumulator, initial_value.axes)
next_element_lt = core.LabeledTensor(
next_element, list(labeled_tensor.axes.values())[1:])
return fn(accumulator_lt, next_element_lt).tensor
foldl_op = functional_ops.foldl(
tf_fn, labeled_tensor.tensor, initializer=initial_value.tensor)
foldl_lt = core.LabeledTensor(foldl_op, initial_value.axes)
return core.identity(foldl_lt, name=scope)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike,
tc.Optional(tc.Collection(string_types)), tc.Optional(string_types))
def squeeze(labeled_tensor, axis_names=None, name=None):
"""Remove size-1 dimensions.
See tf.squeeze.
Args:
labeled_tensor: The input tensor.
axis_names: The names of the dimensions to remove, or None to remove
all size-1 dimensions.
name: Optional op name.
Returns:
A tensor with the specified dimensions removed.
Raises:
ValueError: If the named axes are not in the tensor, or if they are
not size-1.
"""
with ops.name_scope(name, 'lt_squeeze', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
if axis_names is None:
axis_names = [a.name for a in labeled_tensor.axes.values() if len(a) == 1]
for axis_name in axis_names:
if axis_name not in labeled_tensor.axes:
raise ValueError('axis %s is not in tensor axes %s' %
(axis_name, labeled_tensor.axes))
elif len(labeled_tensor.axes[axis_name]) != 1:
raise ValueError(
'cannot squeeze axis with size greater than 1: (%s, %s)' %
(axis_name, labeled_tensor.axes[axis_name]))
squeeze_dimensions = []
axes = []
for i, axis in enumerate(labeled_tensor.axes.values()):
if axis.name in axis_names:
squeeze_dimensions.append(i)
else:
axes.append(axis)
if squeeze_dimensions:
squeeze_op = array_ops.squeeze(
labeled_tensor.tensor, squeeze_dimensions, name=scope)
else:
squeeze_op = array_ops.identity(labeled_tensor.tensor, name=scope)
return core.LabeledTensor(squeeze_op, axes)
# pylint: disable=invalid-name
ReduceAxis = tc.Union(string_types,
tc.Tuple(string_types, collections.Hashable))
ReduceAxes = tc.Optional(tc.Union(ReduceAxis, tc.Collection(ReduceAxis)))
# pylint: enable=invalid-name
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike, core.LabeledTensorLike,
tc.Optional(string_types))
def matmul(a, b, name=None):
"""Matrix multiply two tensors with rank 1 or 2.
If both tensors have rank 2, a matrix-matrix product is performed.
If one tensor has rank 1 and the other has rank 2, then a matrix-vector
product is performed.
If both tensors have rank 1, then a vector dot-product is performed.
(This behavior matches that of `numpy.dot`.)
Both tensors must share exactly one dimension in common, which is the
dimension the operation is summed along. The inputs will be automatically
transposed if necessary as part of the matmul op.
We intend to eventually support `matmul` on higher rank input, and also
eventually support summing over any number shared dimensions (via an `axis`
argument), but neither of these features has been implemented yet.
Args:
a: First LabeledTensor.
b: Second LabeledTensor.
name: Optional op name.
Returns:
LabeledTensor with the result of matrix multiplication. Axes are ordered by
the current axis_order_scope, if set, or in or order of appearance on the
inputs.
Raises:
NotImplementedError: If inputs have rank >2 or share multiple axes.
ValueError: If the inputs have rank 0 or do not share any axes.
"""
with ops.name_scope(name, 'lt_matmul', [a, b]) as scope:
a = core.convert_to_labeled_tensor(a)
b = core.convert_to_labeled_tensor(b)
if len(a.axes) > 2 or len(b.axes) > 2:
# We could pass batched inputs to tf.matmul to make this work, but we
# would also need to use tf.tile and/or tf.transpose. These are more
# expensive than doing reshapes, so it's not clear if it's a good idea to
# do this automatically.
raise NotImplementedError(
'matmul currently requires inputs with rank 2 or less, but '
'inputs have ranks %r and %r' % (len(a.axes), len(b.axes)))
if not a.axes or not b.axes:
raise ValueError(
'matmul currently requires inputs with at least rank 1, but '
'inputs have ranks %r and %r' % (len(a.axes), len(b.axes)))
shared_axes = set(a.axes) & set(b.axes)
if len(shared_axes) > 1:
raise NotImplementedError(
'matmul does not yet support summing over multiple shared axes: %r. '
'Use transpose and reshape to create a single shared axis to sum '
'over.' % shared_axes)
if not shared_axes:
raise ValueError('there must have exactly one axis in common between '
'input to matmul: %r, %r' %
(a.axes.keys(), b.axes.keys()))
shared_axis, = shared_axes
if a.axes[shared_axis] != b.axes[shared_axis]:
raise ValueError('axis %r does not match on input arguments: %r vs %r' %
(shared_axis, a.axes[shared_axis].value,
b.axes[shared_axis].value))
result_axes = []
for axes in [a.axes, b.axes]:
for axis in axes.values():
if axis.name != shared_axis:
result_axes.append(axis)
axis_scope_order = core.get_axis_order()
if axis_scope_order is not None:
result_axis_names = [axis.name for axis in result_axes]
new_axis_names = [
name for name in axis_scope_order if name in result_axis_names
]
if new_axis_names != result_axis_names:
# switch a and b
b, a = a, b
# result_axes is a list of length 1 or 2
result_axes = result_axes[::-1]
squeeze_dims = []
if len(a.axes) == 1:
a_tensor = array_ops.reshape(a.tensor, (1, -1))
squeeze_dims.append(0)
transpose_a = False
else:
a_tensor = a.tensor
transpose_a = list(a.axes.keys()).index(shared_axis) == 0
if len(b.axes) == 1:
b_tensor = array_ops.reshape(b.tensor, (-1, 1))
squeeze_dims.append(1)
transpose_b = False
else:
b_tensor = b.tensor
transpose_b = list(b.axes.keys()).index(shared_axis) == 1
result_op = math_ops.matmul(
a_tensor, b_tensor, transpose_a=transpose_a, transpose_b=transpose_b)
if squeeze_dims:
result_op = array_ops.squeeze(result_op, squeeze_dims)
result_op = array_ops.identity(result_op, name=scope)
return core.LabeledTensor(result_op, result_axes)
@tc.returns(types.FunctionType)
@tc.accepts(string_types, collections.Callable)
def define_reduce_op(op_name, reduce_fn):
"""Define a reduction op for labeled tensors.
Args:
op_name: string name of the TensorFlow op.
reduce_fn: function to call to evaluate the op on a tf.Tensor.
Returns:
Function defining the given reduction op that acts on a LabeledTensor.
"""
default_name = 'lt_%s' % op_name
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike, ReduceAxes, tc.Optional(string_types))
def op(labeled_tensor, axes=None, name=None):
"""Computes the given reduction across the given axes of a LabeledTensor.
See `tf.{op_name}` for full details.
Args:
labeled_tensor: The input tensor.
axes: A set of axes or None.
If None, all axes will be reduced.
Axes must all be strings, in which case those dimensions will be
removed, or pairs of (name, None) or (name, label), in which case those
dimensions will be kept.
name: Optional op name.
Returns:
The reduced LabeledTensor.
Raises:
ValueError: if any of the axes to reduce over are not found on
`labeled_tensor`.
"""
with ops.name_scope(name, default_name, [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
if axes is None:
axes = labeled_tensor.axes.keys()
if isinstance(axes, (string_types, tuple)):
axes = [axes]
reduction_axes = {}
axes_to_squeeze = []
for a in axes:
if isinstance(a, string_types):
# We squeeze out this axis.
reduction_axes[a] = a
axes_to_squeeze.append(a)
else:
# We keep this axis, with the user-provided labels.
(axis_name, label) = a
if label is not None:
# The input was a single label, so make it a list so it can be
# turned into an Axis.
label = [label]
reduction_axes[axis_name] = (axis_name, label)
for axis_name in reduction_axes:
if axis_name not in labeled_tensor.axes:
raise ValueError('Axis %s not in axes %s' %
(axis_name, labeled_tensor.axes))
intermediate_axes = []
reduction_dimensions = []
for i, axis in enumerate(labeled_tensor.axes.values()):
if axis.name in reduction_axes:
intermediate_axes.append(reduction_axes[axis.name])
reduction_dimensions.append(i)
else:
intermediate_axes.append(axis)
reduce_op = reduce_fn(
labeled_tensor.tensor, reduction_dimensions, keep_dims=True)
reduce_lt = core.LabeledTensor(reduce_op, intermediate_axes)
return squeeze(reduce_lt, axes_to_squeeze, name=scope)
op.__doc__ = op.__doc__.format(op_name=op_name)
op.__name__ = op_name
return op
reduce_all = define_reduce_op('reduce_all', math_ops.reduce_all)
reduce_any = define_reduce_op('reduce_any', math_ops.reduce_any)
reduce_logsumexp = define_reduce_op('reduce_logsumexp',
math_ops.reduce_logsumexp)
reduce_max = define_reduce_op('reduce_max', math_ops.reduce_max)
reduce_mean = define_reduce_op('reduce_mean', math_ops.reduce_mean)
reduce_min = define_reduce_op('reduce_min', math_ops.reduce_min)
reduce_prod = define_reduce_op('reduce_prod', math_ops.reduce_prod)
reduce_sum = define_reduce_op('reduce_sum', math_ops.reduce_sum)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike,
tc.Mapping(str, tc.Union(int, ops.Tensor)),
tc.Optional(string_types))
def tile(labeled_tensor, multiples, name=None):
"""Constructs a tensor by tiling a given tensor.
Only axes without tick-labels can be tiled. (Otherwise, axis labels on tiled
tensors would no longer be unique.)
See lt.tile.
Args:
labeled_tensor: The input tensor.
multiples: A mapping where the keys are axis names and the values are the
integer number of times to tile along that axis. Only axes with a multiple
different than 1 need be included.
name: Optional op name.
Returns:
A tensor with the indicated axes tiled.
Raises:
ValueError: If the tiled axes are not axes in the input tensor, or if any
axes in multiples have tick labels.
"""
with ops.name_scope(name, 'lt_tile', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
if not set(multiples.keys()) <= set(labeled_tensor.axes.keys()):
raise ValueError('tile axes %r are not contained in the set of axis '
'names %r on the input labeled tensor' %
(multiples.keys(), labeled_tensor.axes))
labeled_axes = [
name for name in multiples
if labeled_tensor.axes[name].labels is not None
]
if labeled_axes:
raise ValueError('cannot tile axes with tick labels: %r' % labeled_axes)
multiples_list = [multiples.get(name, 1) for name in labeled_tensor.axes]
tile_op = array_ops.tile(labeled_tensor.tensor, multiples_list, name=scope)
new_axes = [
axis.name if axis.labels is None else axis
for axis in labeled_tensor.axes.values()
]
return core.LabeledTensor(tile_op, new_axes)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike,
tc.Mapping(str, tc.Tuple(core.AxisValue, core.AxisValue)),
string_types, tc.Optional(string_types))
def pad(labeled_tensor, paddings, mode='CONSTANT', name=None):
"""Pads a tensor.
See tf.pad.
Args:
labeled_tensor: The input tensor.
paddings: A mapping where the keys are axis names and the values are
tuples where the first element is the padding to insert at the beginning
of the axis and the second is the padding to insert at the end of the
axis.
mode: One of "CONSTANT", "REFLECT", or "SYMMETRIC".
name: Optional op name.
Returns:
A tensor with the indicated axes padded, optionally with those axes extended
with the provided labels.
Raises:
ValueError: If the padded axes are not axes in the input tensor.
"""
with ops.name_scope(name, 'lt_pad', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
if not set(paddings.keys()) <= set(labeled_tensor.axes.keys()):
raise ValueError('pad axes %r are not contained in the set of axis '
'names %r on the input labeled tensor' %
(paddings.keys(), labeled_tensor.axes))
new_axes = []
padding_pairs = []
for name, axis in labeled_tensor.axes.items():
if name in paddings:
padding_before, padding_after = paddings[name]
axis_before = core.Axis(name, padding_before)
axis_after = core.Axis(name, padding_after)
new_axes.append(core.concat_axes([axis_before, axis, axis_after]))
padding_pairs.append((len(axis_before), len(axis_after)))
else:
new_axes.append(axis)
padding_pairs.append((0, 0))
pad_op = array_ops.pad(labeled_tensor.tensor,
padding_pairs,
mode,
name=scope)
return core.LabeledTensor(pad_op, new_axes)
@tc.returns(core.LabeledTensor)
@tc.accepts(
tc.Union(np.ndarray, list, tuple, core.Scalar),
tc.Optional(dtypes.DType),
tc.Optional(
tc.Union(core.Axes, tc.Collection(
tc.Union(string_types, core.AxisLike)))), tc.Optional(string_types))
def constant(value, dtype=None, axes=None, name=None):
"""Creates a constant tensor.
If `axes` includes any strings, shape is inferred from `value`. Otherwise,
the sizes of the given `axes` are used to set `shape` for `tf.constant`.
See tf.constant for more details.
Args:
value: The input tensor.
dtype: The type of the returned tensor.
axes: Optional Axes, list of strings or list of objects coercible to Axis
objects. By default, axes are assumed to be an empty list (i.e., `value`
is treated as a scalar).
name: Optional op name.
Returns:
The tensor with elements set to zero.
"""
with ops.name_scope(name, 'lt_constant', [value]) as scope:
if axes is None:
axes = []
if isinstance(axes, core.Axes):
axes = axes.values()
if any(isinstance(ax, string_types) for ax in axes):
# need to infer shape
shape = None
else:
# axes already indicate shape
axes = [core.as_axis(a) for a in axes]
shape = [a.size for a in axes]
op = array_ops.constant(value, dtype=dtype, shape=shape, name=scope)
return core.LabeledTensor(op, axes)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike,
tc.Optional(dtypes.DType), tc.Optional(string_types))
def zeros_like(labeled_tensor, dtype=None, name=None):
"""Creates an identical tensor with all elements set to zero.
Args:
labeled_tensor: The input tensor.
dtype: The type of the returned tensor.
name: Optional op name.
Returns:
The tensor with elements set to zero.
"""
with ops.name_scope(name, 'lt_zeros_like', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
op = array_ops.zeros_like(labeled_tensor.tensor, dtype=dtype, name=scope)
return core.LabeledTensor(op, labeled_tensor.axes)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike,
tc.Optional(dtypes.DType), tc.Optional(string_types))
def ones_like(labeled_tensor, dtype=None, name=None):
"""Creates an identical tensor with all elements set to one.
Args:
labeled_tensor: The input tensor.
dtype: The type of the returned tensor.
name: Optional op name.
Returns:
The tensor with elements set to one.
"""
with ops.name_scope(name, 'lt_ones_like', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
op = array_ops.ones_like(labeled_tensor.tensor, dtype=dtype, name=scope)
return core.LabeledTensor(op, labeled_tensor.axes)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike,
tc.Optional(dtypes.DType), tc.Optional(string_types))
def cast(labeled_tensor, dtype=None, name=None):
"""Casts a labeled tensor to a new type.
Args:
labeled_tensor: The input tensor.
dtype: The type of the returned tensor.
name: Optional op name.
Returns:
A labeled tensor with the new dtype.
"""
with ops.name_scope(name, 'lt_cast', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
op = math_ops.cast(labeled_tensor.tensor, dtype=dtype, name=scope)
return core.LabeledTensor(op, labeled_tensor.axes)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike, string_types, tc.Optional(string_types))
def verify_tensor_all_finite(labeled_tensor, message, name=None):
"""Asserts a tensor doesn't contain NaNs or Infs.
See tf.verify_tensor_all_finite.
Args:
labeled_tensor: The input tensor.
message: Message to log on failure.
name: Optional op name.
Returns:
The input tensor.
"""
with ops.name_scope(name, 'lt_verify_tensor_all_finite',
[labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
op = numerics.verify_tensor_all_finite(
labeled_tensor.tensor, msg=message, name=scope)
return core.LabeledTensor(op, labeled_tensor.axes)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike, core.LabeledTensorLike,
tc.Optional(string_types))
def boolean_mask(labeled_tensor, mask, name=None):
"""Apply a boolean mask to a labeled tensor.
Unlike `tf.boolean_mask`, this currently only works on 1-dimensional masks.
The mask is applied to the first axis of `labeled_tensor`. Labels on the first
axis are removed, because True indices in `mask` may not be known dynamically.
Args:
labeled_tensor: The input tensor.
mask: The type of the returned tensor.
name: Optional op name.
Returns:
The masked labeled tensor.
Raises:
ValueError: if the first axis of the mask
"""
with ops.name_scope(name, 'lt_boolean_mask', [labeled_tensor, mask]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
mask = core.convert_to_labeled_tensor(mask)
if len(mask.axes) > 1:
raise NotImplementedError(
"LabeledTensor's boolean_mask currently only supports 1D masks")
mask_axis = list(mask.axes.values())[0]
lt_axis = list(labeled_tensor.axes.values())[0]
if mask_axis != lt_axis:
raise ValueError('the first axis of the labeled tensor and the mask '
'are not equal:\n%r\n%r' % (lt_axis, mask_axis))
op = array_ops.boolean_mask(labeled_tensor.tensor, mask.tensor, name=scope)
# TODO(shoyer): attempt to infer labels for the masked values, by calling
# tf.contrib.util.constant_value on the mask?
axes = [lt_axis.name] + list(labeled_tensor.axes.values())[1:]
return core.LabeledTensor(op, axes)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike, core.LabeledTensorLike,
core.LabeledTensorLike, tc.Optional(string_types))
def where(condition, x, y, name=None):
"""Return elements from x or y depending on condition.
See `tf.where` for more details. This function currently only implements the
three argument version of where.
Args:
condition: LabeledTensor of type `bool`.
x: LabeledTensor for values where condition is true.
y: LabeledTensor for values where condition is false.
name: Optional op name.
Returns:
The labeled tensor with values according to condition.
Raises:
ValueError: if `x` and `y` have different axes, or if the axes of `x` do not
start with the axes of `condition`.
"""
with ops.name_scope(name, 'lt_where', [condition, x, y]) as scope:
condition = core.convert_to_labeled_tensor(condition)
x = core.convert_to_labeled_tensor(x)
y = core.convert_to_labeled_tensor(y)
if not condition.axes == x.axes == y.axes:
raise ValueError('all inputs to `where` must have equal axes')
op = array_ops.where(condition.tensor, x.tensor, y.tensor, name=scope)
return core.LabeledTensor(op, x.axes)
|
apache-2.0
|
lukw00/shogun
|
examples/undocumented/python_modular/graphical/interactive_gp_demo.py
|
16
|
14207
|
#
# This program is free software you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation either version 3 of the License, or
# (at your option) any later version.
#
# Written (C) 2012 Heiko Strathmann, based on interactive_svm_demo by Christian
# Widmer which itself is based on PyQT Demo by Eli Bendersky
#
"""
Shogun Gaussian processes demo based on interactive SVM demo by Christian \
Widmer and Soeren Sonnenburg which itself is based on PyQT Demo by Eli Bendersky
Work to be done on parameter (e.g. kernel width) optimization.
Heiko Strathmann/Cameron Lai
License: GPLv3
"""
import sys, os, csv
import scipy as SP
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from numpy import *
import matplotlib
from matplotlib import mpl
from matplotlib.colorbar import make_axes, Colorbar
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QTAgg as NavigationToolbar
from matplotlib.figure import Figure
from modshogun import *
from modshogun import *
from modshogun import *
import util
class Form(QMainWindow):
def __init__(self, parent=None):
super(Form, self).__init__(parent)
self.setWindowTitle('SHOGUN interactive demo')
self.series_list_model = QStandardItemModel()
self.create_menu()
self.create_main_frame()
self.create_status_bar()
self.create_toy_data()
self.on_show()
def on_show(self):
self.axes.clear()
self.axes.plot(self.x, self.y, 'ro')
self.axes.set_xlim((self.xmin,self.xmax))
self.axes.set_ylim((self.ymin,self.ymax))
self.axes.grid(True)
self.canvas.draw()
self.fill_series_list(self.get_stats())
def on_about(self):
msg = __doc__
QMessageBox.about(self, "About the demo", msg.strip())
def fill_series_list(self, names):
self.series_list_model.clear()
for name in names:
item = QStandardItem(name)
item.setCheckState(Qt.Unchecked)
item.setCheckable(False)
self.series_list_model.appendRow(item)
def onclick(self, event):
print 'button=%d, x=%d, y=%d, xdata=%f, ydata=%f'%(event.button, event.x, event.y, event.xdata, event.ydata)
x=SP.append(self.x, event.xdata)
self.y=SP.append(self.y, event.ydata)
self.x= x[:,SP.newaxis]
self.on_show()
self.status_text.setText("New data point: x=%f, y=%f"%(event.xdata, event.ydata))
def create_menu(self):
self.file_menu = self.menuBar().addMenu("&File")
#load_action = self.create_action("&Load file",
# shortcut="Ctrl+L", slot=self.load_file, tip="Load a file")
quit_action = self.create_action("&Quit", slot=self.close,
shortcut="Ctrl+Q", tip="Close the application")
#self.add_actions(self.file_menu,
# (load_action, None, quit_action))
self.help_menu = self.menuBar().addMenu("&Help")
about_action = self.create_action("&About",
shortcut='F1', slot=self.on_about,
tip='About the demo')
self.add_actions(self.help_menu, (about_action,))
def clear_data(self):
self.x=SP.array([])
self.y=SP.array([])
self.xmin=-5
self.xmax=5
self.ymin=-5
self.ymax=5
self.on_show()
self.status_text.setText("Data cleared")
def enable_widgets(self):
kernel_name = self.kernel_combo.currentText()
if kernel_name == "Linear":
self.sigma.setDisabled(True)
self.degree.setDisabled(True)
elif kernel_name == "Polynomial":
self.sigma.setDisabled(True)
self.degree.setEnabled(True)
elif kernel_name == "Gaussian":
self.sigma.setEnabled(True)
self.degree.setDisabled(True)
def get_stats(self):
num_train = len(self.x)
str_train = "num training points: %i" % num_train
str_test = "num training points: %s" % self.nTest.text()
return (str_train, str_test)
def create_toy_data(self):
#0. generate Toy-Data; just samples from a superposition of a sin + linear trend
x = SP.arange(self.xmin,self.xmax,(self.xmax-self.xmin)/100.0)
C = 2 #offset
b = 0
y = b*x + C + float(self.sine_amplitude.text())*SP.sin(float(self.sine_freq.text())*x)
# dy = b + 1*SP.cos(x)
y += float(self.noise_level.text())*random.randn(y.shape[0])
self.y=y-y.mean()
self.x= x[:,SP.newaxis]
self.on_show()
def learn_kernel_width(self):
root=ModelSelectionParameters();
c1=ModelSelectionParameters("inference_method", inf);
root.append_child(c1);
c2 = ModelSelectionParameters("scale");
c1.append_child(c2);
c2.build_values(0.01, 4.0, R_LINEAR);
c3 = ModelSelectionParameters("likelihood_model", likelihood);
c1.append_child(c3);
c4=ModelSelectionParameters("sigma");
c3.append_child(c4);
c4.build_values(0.001, 4.0, R_LINEAR);
c5 =ModelSelectionParameters("kernel", SECF);
c1.append_child(c5);
c6 =ModelSelectionParameters("width");
c5.append_child(c6);
c6.build_values(0.001, 4.0, R_LINEAR);
crit = GradientCriterion();
grad=GradientEvaluation(gp, feat_train, labels, crit);
grad.set_function(inf);
gp.print_modsel_params();
root.print_tree();
grad_search=GradientModelSelection(root, grad);
grad.set_autolock(0);
best_combination=grad_search.select_model(1);
self.sigma.setText("1.0")
self.plot_gp()
def plot_gp(self):
feat_train = RealFeatures(self.x.T)
labels = RegressionLabels(self.y)
#[x,y]=self.data.get_data()
#feat_train=RealFeatures(x.T)
#labels=RegressionLabels(y)
n_dimensions = 1
kernel_name = self.kernel_combo.currentText()
print "current kernel is %s" % (kernel_name)
#new interface with likelihood parametres being decoupled from the covaraince function
likelihood = GaussianLikelihood()
#covar_parms = SP.log([2])
#hyperparams = {'covar':covar_parms,'lik':SP.log([1])}
# construct covariance function
width=float(self.sigma.text())
degree=int(self.degree.text())
if kernel_name == "Linear":
gk = LinearKernel(feat_train, feat_train)
gk.set_normalizer(IdentityKernelNormalizer())
elif kernel_name == "Polynomial":
gk = PolyKernel(feat_train, feat_train, degree, True)
gk.set_normalizer(IdentityKernelNormalizer())
elif kernel_name == "Gaussian":
gk = GaussianKernel(feat_train, feat_train, width)
#SECF = GaussianKernel(feat_train, feat_train, width)
#covar = SECF
zmean = ZeroMean();
inf = ExactInferenceMethod(gk, feat_train, zmean, labels, likelihood);
inf.get_negative_marginal_likelihood()
# location of unispaced predictions
x_test = array([linspace(self.xmin,self.xmax, self.nTest.text())])
feat_test=RealFeatures(x_test)
gp = GaussianProcessRegression(inf)
gp.train()
covariance = gp.get_variance_vector(feat_test)
predictions = gp.get_mean_vector(feat_test)
#print "x_test"
#print feat_test.get_feature_matrix()
#print "mean predictions"
#print predictions.get_labels()
#print "covariances"
#print covariance.get_labels()
self.status_text.setText("Negative Log Marginal Likelihood = %f"%(inf.get_negative_marginal_likelihood()))
self.axes.clear()
self.axes.grid(True)
self.axes.set_xlim((self.xmin,self.xmax))
self.axes.set_ylim((self.ymin,self.ymax))
self.axes.hold(True)
x_test=feat_test.get_feature_matrix()[0]
self.axes.plot(x_test, predictions, 'b-x')
#self.axes.plot(x_test, labels.get_labels(), 'ro')
self.axes.plot(self.x, self.y, 'ro')
#self.axes.plot(feat_test.get_feature_matrix()[0], predictions.get_labels()-3*sqrt(covariance.get_labels()))
#self.axes.plot(feat_test.get_feature_matrix()[0], predictions.get_labels()+3*sqrt(covariance.get_labels()))
upper = predictions+3*sqrt(covariance)
lower = predictions-3*sqrt(covariance)
self.axes.fill_between(x_test, lower, upper, color='grey')
self.axes.hold(False)
self.canvas.draw()
self.fill_series_list(self.get_stats())
def create_main_frame(self):
self.xmin=-5
self.xmax=5
self.ymin=-5
self.ymax=5
self.main_frame = QWidget()
plot_frame = QWidget()
self.dpi = 100
self.fig = Figure((6.0, 6.0), dpi=self.dpi)
self.canvas = FigureCanvas(self.fig)
self.canvas.setParent(self.main_frame)
cid = self.canvas.mpl_connect('button_press_event', self.onclick)
self.axes = self.fig.add_subplot(111)
self.cax = None
#self.mpl_toolbar = NavigationToolbar(self.canvas, self.main_frame)
self.kernel_combo = QComboBox()
self.kernel_combo.insertItem(-1, "Gaussian")
self.kernel_combo.insertItem(-1, "Polynomial")
self.kernel_combo.insertItem(-1, "Linear")
self.kernel_combo.maximumSize = QSize(300, 50)
self.connect(self.kernel_combo, SIGNAL("currentIndexChanged(QString)"), self.enable_widgets)
log_label = QLabel("Data points")
self.series_list_view = QListView()
self.series_list_view.setModel(self.series_list_model)
self.sine_freq = QLineEdit()
self.sine_freq.setText("1.0")
self.sine_amplitude = QLineEdit()
self.sine_amplitude.setText("1.0")
self.sigma = QLineEdit()
self.sigma.setText("1.2")
self.degree = QLineEdit()
self.degree.setText("2")
self.noise_level = QLineEdit()
self.noise_level.setText("1")
self.nTest = QLineEdit()
self.nTest.setText("100")
spins_hbox = QHBoxLayout()
spins_hbox.addWidget(QLabel('Sine data setting: '))
spins_hbox.addWidget(QLabel('Sine Freq.'))
spins_hbox.addWidget(self.sine_freq)
spins_hbox.addWidget(QLabel('Sine Amplitude'))
spins_hbox.addWidget(self.sine_amplitude)
spins_hbox.addWidget(QLabel('Noise Level'))
spins_hbox.addWidget(self.noise_level)
spins_hbox.addStretch(1)
spins_hbox2 = QHBoxLayout()
spins_hbox2.addWidget(QLabel('Kernel Setting: '))
spins_hbox2.addWidget(QLabel('Type'))
spins_hbox2.addWidget(self.kernel_combo)
spins_hbox2.addWidget(QLabel("Width"))
spins_hbox2.addWidget(self.sigma)
spins_hbox2.addWidget(QLabel("Degree"))
spins_hbox2.addWidget(self.degree)
spins_hbox2.addStretch(1)
spins_hbox3 = QHBoxLayout()
spins_hbox3.addWidget(QLabel('Test Setting: '))
spins_hbox3.addWidget(QLabel('Number of test points'))
spins_hbox3.addWidget(self.nTest)
spins_hbox3.addStretch(1)
self.show_button = QPushButton("&Train GP")
self.connect(self.show_button, SIGNAL('clicked()'), self.plot_gp)
self.gen_sine_data_button = QPushButton("&Generate Sine Data")
self.connect(self.gen_sine_data_button, SIGNAL('clicked()'), self.create_toy_data)
self.clear_data_button = QPushButton("&Clear")
self.connect(self.clear_data_button, SIGNAL('clicked()'), self.clear_data)
self.learn_kernel_button = QPushButton("&Learn Kernel Width and train GP")
self.connect(self.learn_kernel_button, SIGNAL('clicked()'), self.learn_kernel_width)
left_vbox = QVBoxLayout()
left_vbox.addWidget(self.canvas)
#left_vbox.addWidget(self.mpl_toolbar)
right0_vbox = QVBoxLayout()
right0_vbox.addWidget(QLabel("Data Points"))
right0_vbox.addWidget(self.series_list_view)
#right0_vbox.addWidget(self.legend_cb)
right0_vbox.addStretch(1)
right2_vbox = QVBoxLayout()
right2_vbox.addWidget(QLabel("Settings"))
right2_vbox.addWidget(self.gen_sine_data_button)
right2_vbox.addWidget(self.clear_data_button)
right2_vbox.addWidget(self.show_button)
#right2_vbox.addWidget(self.learn_kernel_button)
right2_vbox.addLayout(spins_hbox)
right2_vbox.addLayout(spins_hbox2)
right2_vbox.addLayout(spins_hbox3)
right2_vbox.addStretch(1)
right_vbox = QHBoxLayout()
right_vbox.addLayout(right0_vbox)
right_vbox.addLayout(right2_vbox)
hbox = QVBoxLayout()
hbox.addLayout(left_vbox)
hbox.addLayout(right_vbox)
self.main_frame.setLayout(hbox)
self.setCentralWidget(self.main_frame)
self.enable_widgets()
def create_status_bar(self):
self.status_text = QLabel("")
self.statusBar().addWidget(self.status_text, 1)
def add_actions(self, target, actions):
for action in actions:
if action is None:
target.addSeparator()
else:
target.addAction(action)
def create_action( self, text, slot=None, shortcut=None,
icon=None, tip=None, checkable=False,
signal="triggered()"):
action = QAction(text, self)
if icon is not None:
action.setIcon(QIcon(":/%s.png" % icon))
if shortcut is not None:
action.setShortcut(shortcut)
if tip is not None:
action.setToolTip(tip)
action.setStatusTip(tip)
if slot is not None:
self.connect(action, SIGNAL(signal), slot)
if checkable:
action.setCheckable(True)
return action
def main():
app = QApplication(sys.argv)
form = Form()
form.show()
app.exec_()
if __name__ == "__main__":
main()
|
gpl-3.0
|
brianlorenz/COSMOS_IMACS_Redshifts
|
PlotCodes/Plot_AvStruct1prop.py
|
1
|
5909
|
#Creates a BPT diagram for all objects, and a second figure that shows objects for which single lines are low
import numpy as np
import matplotlib.pyplot as plt
from astropy.io import ascii
import sys, os, string
import pandas as pd
from astropy.io import fits
import collections
from astropy.cosmology import WMAP9 as cosmo
from astropy.stats import biweight_midvariance
#Folder to save the figures
figout = '/Users/blorenz/COSMOS/Reports/2018/Images/'
#The location with the file for all of our data
fluxdatapath = '/Users/blorenz/COSMOS/COSMOSData/lineflux_red.txt'
#Location of the equivalent width data
ewdata = '/Users/blorenz/COSMOS/COSMOSData/lineew.txt'
#Read in the ew of the lines
ew_df = ascii.read(ewdata).to_pandas()
#The location to store the scale and its stddev of each line
qualdatapath = '/Users/blorenz/COSMOS/COSMOSData/dataqual.txt'
#Read in the scale of the lines
dataqual = ascii.read(qualdatapath).to_pandas()
d = {'True': True, 'False': False}
#File with the error array
errdatapath = '/Users/blorenz/COSMOS/COSMOSData/errs.txt'
#Read in the scale of the lines
err_df = ascii.read(errdatapath,data_start=1,header_start=0,format='csv').to_pandas()
#Read the datafile:
fluxdata = ascii.read(fluxdatapath).to_pandas()
#File with the structural properties
spropdatapath = '/Users/blorenz/COSMOS/COSMOSData/struct_prop.txt'
#Read in the scale of the lines
sprop_df = ascii.read(spropdatapath).to_pandas()
sprop_df = sprop_df.rename(columns={'id':'OBJID'})
fluxdata = pd.merge(fluxdata,sprop_df)
#Fontsizes for plotting
axisfont = 24
ticksize = 18
ticks = 8
titlefont = 24
legendfont = 16
textfont = 16
#Division function
def divz(X,Y):
return X/np.where(Y,Y,Y+1)*np.not_equal(Y,0)
lines=['6563_fix','4861']
#Filter the data
goodlines = [dataqual[line+'_good'].map(d) for line in lines]
#Needs to be good in all lines to be good
allgood = np.logical_and.reduce(goodlines)
#Needs to be bad in any line to be bad
badlines = [dataqual[line+'_bad'].map(d) for line in lines]
baddata = np.logical_or.reduce(badlines)
lowlines = [dataqual[line+'_low'].map(d) for line in lines]
#Needs to be low in any line to be low, and also not bad in a line
somelow = np.logical_and(np.logical_or.reduce(lowlines),np.logical_not(baddata))
fig,axarr = plt.subplots(2,3,figsize=(24,15),sharex=True,sharey=True)
axarr = np.reshape(axarr,6)
#Gets rid of objects with bad ellipticities
filtar = fluxdata['ar']>0
#Plot the data with error bars
#Counter
c = 0
plotdata = 'ar'
ylabel = 'Axis Ratio'
savename = 'AxisRatio'
#fluxdata['n']=np.log10(fluxdata['n'])
#fluxdata['SMD']=np.log10(divz(fluxdata['LMASS'],(4*np.pi*fluxdata['re_kpc']**2)))
ms=12
lwbw=2
for ax in axarr:
if c in [0,1,2]:
massfilt = fluxdata['LMASS']<9.5
else:
massfilt = fluxdata['LMASS']>=9.5
if c in [0,3]:
col = 'good'
filt = allgood
color='blue'
elif c in [1,4]:
col = 'low'
filt = somelow
color='orange'
else:
col = 'bad'
filt = baddata
color='red'
#ax.errorbar(fluxdata[filt][filtar]['av'],fluxdata[filt][filtar]['ar'],xerr=fluxdata[filt][filtar]['dav1'],color=color,marker='o',ms=4,lw=0.5,ls='None')
#ax2.errorbar(fluxdata[filt][filtar]['av'],fluxdata[filt][filtar]['re_kpc'],xerr=fluxdata[filt][filtar]['dav1'],color=color,marker='o',ms=4,lw=0.5,ls='None')
#Titles, axes, legends
acount = 0
filttype = (fluxdata[plotdata]>-98.9)
if c==0:
ax.set_ylabel(ylabel+', LMASS < 9.5',fontsize = axisfont)
if c==3:
ax.set_ylabel(ylabel+', LMASS >= 9.5',fontsize = axisfont)
ax.set_xlabel('Av (mag)',fontsize = axisfont)
ax.tick_params(labelsize = ticksize, size=ticks)
filters = np.logical_and(filt,massfilt)
filters = np.logical_and(filters,filttype)
ax.errorbar(fluxdata[filters]['av'],fluxdata[filters][plotdata],xerr=fluxdata[filters]['dav1'],color=color,marker='o',ms=4,lw=0.5,ls='None')
print len(fluxdata[filters])
if c in [0,3]:
mr1 = (fluxdata[allgood]['ar']<0.25)
mr2 = np.logical_and(fluxdata[filters]['ar']>=0.25,fluxdata[filters]['ar']<0.5)
mr3 = np.logical_and(fluxdata[filters]['ar']>=0.5,fluxdata[filters]['ar']<0.75)
mr4 = (fluxdata[filters]['ar']>=0.75)
med1 = np.median(fluxdata[filters][mr1].av)
med2 = np.median(fluxdata[filters][mr2].av)
med3 = np.median(fluxdata[filters][mr3].av)
med4 = np.median(fluxdata[filters][mr4].av)
med751 = np.percentile(fluxdata[filters][mr1].av,75)
med752 = np.percentile(fluxdata[filters][mr2].av,75)
med753 = np.percentile(fluxdata[filters][mr3].av,75)
med754 = np.percentile(fluxdata[filters][mr4].av,75)
emed1 = np.sqrt(biweight_midvariance(fluxdata[filters][mr1].av))
emed2 = np.sqrt(biweight_midvariance(fluxdata[filters][mr2].av))
emed3 = np.sqrt(biweight_midvariance(fluxdata[filters][mr3].av))
emed4 = np.sqrt(biweight_midvariance(fluxdata[filters][mr4].av))
ax.errorbar(med1,0.125,xerr=emed1,color='black',marker='o',ms=ms,lw=lwbw,ls='None')
ax.errorbar(med2,0.375,xerr=emed2,color='black',marker='o',ms=ms,lw=lwbw,ls='None')
ax.errorbar(med3,0.625,xerr=emed3,color='black',marker='o',ms=ms,lw=lwbw,ls='None')
ax.errorbar(med4,0.875,xerr=emed4,color='black',marker='o',ms=ms,lw=lwbw,ls='None')
ax.errorbar(med751,0.125,xerr=emed1,color='red',marker='o',ms=ms,lw=lwbw,ls='None')
ax.errorbar(med752,0.375,xerr=emed2,color='red',marker='o',ms=ms,lw=lwbw,ls='None')
ax.errorbar(med753,0.625,xerr=emed3,color='red',marker='o',ms=ms,lw=lwbw,ls='None')
ax.errorbar(med754,0.875,xerr=emed4,color='red',marker='o',ms=ms,lw=lwbw,ls='None')
ax.set_xlim(-0.5,5)
ax.set_ylim(0,1)
c = c+1
fig.tight_layout()
fig.savefig(figout + 'Av_'+savename+'_mass.pdf')
plt.close(fig)
|
mit
|
jseabold/statsmodels
|
statsmodels/genmod/tests/test_bayes_mixed_glm.py
|
5
|
18166
|
import numpy as np
from statsmodels.genmod.bayes_mixed_glm import (BinomialBayesMixedGLM,
PoissonBayesMixedGLM)
import pandas as pd
from scipy import sparse
from numpy.testing import assert_allclose, assert_equal
from scipy.optimize import approx_fprime
def gen_simple_logit(nc, cs, s):
np.random.seed(3799)
exog_vc = np.kron(np.eye(nc), np.ones((cs, 1)))
exog_fe = np.random.normal(size=(nc * cs, 2))
vc = s * np.random.normal(size=nc)
lp = np.dot(exog_fe, np.r_[1, -1]) + np.dot(exog_vc, vc)
pr = 1 / (1 + np.exp(-lp))
y = 1 * (np.random.uniform(size=nc * cs) < pr)
ident = np.zeros(nc, dtype=int)
return y, exog_fe, exog_vc, ident
def gen_simple_poisson(nc, cs, s):
np.random.seed(3799)
exog_vc = np.kron(np.eye(nc), np.ones((cs, 1)))
exog_fe = np.random.normal(size=(nc * cs, 2))
vc = s * np.random.normal(size=nc)
lp = np.dot(exog_fe, np.r_[0.1, -0.1]) + np.dot(exog_vc, vc)
r = np.exp(lp)
y = np.random.poisson(r)
ident = np.zeros(nc, dtype=int)
return y, exog_fe, exog_vc, ident
def gen_crossed_logit(nc, cs, s1, s2):
np.random.seed(3799)
a = np.kron(np.eye(nc), np.ones((cs, 1)))
b = np.kron(np.ones((cs, 1)), np.eye(nc))
exog_vc = np.concatenate((a, b), axis=1)
exog_fe = np.random.normal(size=(nc * cs, 1))
vc = s1 * np.random.normal(size=2 * nc)
vc[nc:] *= s2 / s1
lp = np.dot(exog_fe, np.r_[-0.5]) + np.dot(exog_vc, vc)
pr = 1 / (1 + np.exp(-lp))
y = 1 * (np.random.uniform(size=nc * cs) < pr)
ident = np.zeros(2 * nc, dtype=int)
ident[nc:] = 1
return y, exog_fe, exog_vc, ident
def gen_crossed_poisson(nc, cs, s1, s2):
np.random.seed(3799)
a = np.kron(np.eye(nc), np.ones((cs, 1)))
b = np.kron(np.ones((cs, 1)), np.eye(nc))
exog_vc = np.concatenate((a, b), axis=1)
exog_fe = np.random.normal(size=(nc * cs, 1))
vc = s1 * np.random.normal(size=2 * nc)
vc[nc:] *= s2 / s1
lp = np.dot(exog_fe, np.r_[-0.5]) + np.dot(exog_vc, vc)
r = np.exp(lp)
y = np.random.poisson(r)
ident = np.zeros(2 * nc, dtype=int)
ident[nc:] = 1
return y, exog_fe, exog_vc, ident
def gen_crossed_logit_pandas(nc, cs, s1, s2):
np.random.seed(3799)
a = np.kron(np.arange(nc), np.ones(cs))
b = np.kron(np.ones(cs), np.arange(nc))
fe = np.ones(nc * cs)
vc = np.zeros(nc * cs)
for i in np.unique(a):
ii = np.flatnonzero(a == i)
vc[ii] += s1 * np.random.normal()
for i in np.unique(b):
ii = np.flatnonzero(b == i)
vc[ii] += s2 * np.random.normal()
lp = -0.5 * fe + vc
pr = 1 / (1 + np.exp(-lp))
y = 1 * (np.random.uniform(size=nc * cs) < pr)
ident = np.zeros(2 * nc, dtype=int)
ident[nc:] = 1
df = pd.DataFrame({"fe": fe, "a": a, "b": b, "y": y})
return df
def test_simple_logit_map():
y, exog_fe, exog_vc, ident = gen_simple_logit(10, 10, 2)
exog_vc = sparse.csr_matrix(exog_vc)
glmm = BinomialBayesMixedGLM(y, exog_fe, exog_vc, ident, vcp_p=0.5)
rslt = glmm.fit_map()
assert_allclose(
glmm.logposterior_grad(rslt.params),
np.zeros_like(rslt.params),
atol=1e-3)
# Test the predict method
for linear in False, True:
for exog in None, exog_fe:
pr1 = rslt.predict(linear=linear, exog=exog)
pr2 = glmm.predict(rslt.params, linear=linear, exog=exog)
assert_allclose(pr1, pr2)
if not linear:
assert_equal(pr1.min() >= 0, True)
assert_equal(pr1.max() <= 1, True)
def test_simple_poisson_map():
y, exog_fe, exog_vc, ident = gen_simple_poisson(10, 10, 0.2)
exog_vc = sparse.csr_matrix(exog_vc)
glmm1 = PoissonBayesMixedGLM(y, exog_fe, exog_vc, ident, vcp_p=0.5)
rslt1 = glmm1.fit_map()
assert_allclose(
glmm1.logposterior_grad(rslt1.params),
np.zeros_like(rslt1.params),
atol=1e-3)
# This should give the same answer as above
glmm2 = PoissonBayesMixedGLM(y, exog_fe, exog_vc, ident, vcp_p=0.5)
rslt2 = glmm2.fit_map()
assert_allclose(rslt1.params, rslt2.params, atol=1e-4)
# Test the predict method
for linear in False, True:
for exog in None, exog_fe:
pr1 = rslt1.predict(linear=linear, exog=exog)
pr2 = rslt2.predict(linear=linear, exog=exog)
pr3 = glmm1.predict(rslt1.params, linear=linear, exog=exog)
pr4 = glmm2.predict(rslt2.params, linear=linear, exog=exog)
assert_allclose(pr1, pr2, rtol=1e-5)
assert_allclose(pr2, pr3, rtol=1e-5)
assert_allclose(pr3, pr4, rtol=1e-5)
if not linear:
assert_equal(pr1.min() >= 0, True)
assert_equal(pr2.min() >= 0, True)
assert_equal(pr3.min() >= 0, True)
# Check dimensions and PSD status of cov_params
for rslt in rslt1, rslt2:
cp = rslt.cov_params()
p = len(rslt.params)
assert_equal(cp.shape, np.r_[p, p])
np.linalg.cholesky(cp)
def test_crossed_logit_map():
y, exog_fe, exog_vc, ident = gen_crossed_logit(10, 10, 1, 2)
exog_vc = sparse.csr_matrix(exog_vc)
glmm = BinomialBayesMixedGLM(y, exog_fe, exog_vc, ident, vcp_p=0.5)
rslt = glmm.fit_map()
assert_allclose(
glmm.logposterior_grad(rslt.params),
np.zeros_like(rslt.params),
atol=1e-4)
# Check dimensions and PSD status of cov_params
cp = rslt.cov_params()
p = len(rslt.params)
assert_equal(cp.shape, np.r_[p, p])
np.linalg.cholesky(cp)
def test_crossed_poisson_map():
y, exog_fe, exog_vc, ident = gen_crossed_poisson(10, 10, 1, 1)
exog_vc = sparse.csr_matrix(exog_vc)
glmm = PoissonBayesMixedGLM(y, exog_fe, exog_vc, ident, vcp_p=0.5)
rslt = glmm.fit_map()
assert_allclose(
glmm.logposterior_grad(rslt.params),
np.zeros_like(rslt.params),
atol=1e-4)
# Check dimensions and PSD status of cov_params
cp = rslt.cov_params()
p = len(rslt.params)
assert_equal(cp.shape, np.r_[p, p])
np.linalg.cholesky(cp)
def test_logit_map_crossed_formula():
data = gen_crossed_logit_pandas(10, 10, 1, 0.5)
fml = "y ~ fe"
fml_vc = {"a": "0 + C(a)", "b": "0 + C(b)"}
glmm = BinomialBayesMixedGLM.from_formula(fml, fml_vc, data, vcp_p=0.5)
rslt = glmm.fit_map()
assert_allclose(
glmm.logposterior_grad(rslt.params),
np.zeros_like(rslt.params),
atol=1e-4)
rslt.summary()
r = rslt.random_effects("a")
assert_allclose(
r.iloc[0, :].values, np.r_[-0.02004904, 0.094014], atol=1e-4)
# Check dimensions and PSD status of cov_params
cm = rslt.cov_params()
p = rslt.params.shape[0]
assert_equal(list(cm.shape), [p, p])
np.linalg.cholesky(cm)
def test_elbo_grad():
for f in range(2):
for j in range(2):
if f == 0:
if j == 0:
y, exog_fe, exog_vc, ident = gen_simple_logit(10, 10, 2)
else:
y, exog_fe, exog_vc, ident = gen_crossed_logit(
10, 10, 1, 2)
elif f == 1:
if j == 0:
y, exog_fe, exog_vc, ident = gen_simple_poisson(
10, 10, 0.5)
else:
y, exog_fe, exog_vc, ident = gen_crossed_poisson(
10, 10, 1, 0.5)
exog_vc = sparse.csr_matrix(exog_vc)
if f == 0:
glmm1 = BinomialBayesMixedGLM(
y, exog_fe, exog_vc, ident, vcp_p=0.5)
else:
glmm1 = PoissonBayesMixedGLM(
y, exog_fe, exog_vc, ident, vcp_p=0.5)
rslt1 = glmm1.fit_map()
for k in range(3):
if k == 0:
vb_mean = rslt1.params
vb_sd = np.ones_like(vb_mean)
elif k == 1:
vb_mean = np.zeros(len(vb_mean))
vb_sd = np.ones_like(vb_mean)
else:
vb_mean = np.random.normal(size=len(vb_mean))
vb_sd = np.random.uniform(1, 2, size=len(vb_mean))
mean_grad, sd_grad = glmm1.vb_elbo_grad(vb_mean, vb_sd)
def elbo(vec):
n = len(vec) // 2
return glmm1.vb_elbo(vec[:n], vec[n:])
x = np.concatenate((vb_mean, vb_sd))
g1 = approx_fprime(x, elbo, 1e-5)
n = len(x) // 2
mean_grad_n = g1[:n]
sd_grad_n = g1[n:]
assert_allclose(mean_grad, mean_grad_n, atol=1e-2, rtol=1e-2)
assert_allclose(sd_grad, sd_grad_n, atol=1e-2, rtol=1e-2)
def test_simple_logit_vb():
y, exog_fe, exog_vc, ident = gen_simple_logit(10, 10, 0)
exog_vc = sparse.csr_matrix(exog_vc)
glmm1 = BinomialBayesMixedGLM(
y, exog_fe, exog_vc, ident, vcp_p=0.5, fe_p=0.5)
rslt1 = glmm1.fit_map()
glmm2 = BinomialBayesMixedGLM(
y, exog_fe, exog_vc, ident, vcp_p=0.5, fe_p=0.5)
rslt2 = glmm2.fit_vb(rslt1.params)
rslt1.summary()
rslt2.summary()
assert_allclose(
rslt1.params[0:5],
np.r_[0.75330405, -0.71643228, -2.49091288, -0.00959806, 0.00450254],
rtol=1e-4,
atol=1e-4)
assert_allclose(
rslt2.params[0:5],
np.r_[0.79338836, -0.7599833, -0.64149356, -0.24772884, 0.10775366],
rtol=1e-4,
atol=1e-4)
for rslt in rslt1, rslt2:
cp = rslt.cov_params()
p = len(rslt.params)
if rslt is rslt1:
assert_equal(cp.shape, np.r_[p, p])
np.linalg.cholesky(cp)
else:
assert_equal(cp.shape, np.r_[p,])
assert_equal(cp > 0, True*np.ones(p))
def test_simple_poisson_vb():
y, exog_fe, exog_vc, ident = gen_simple_poisson(10, 10, 1)
exog_vc = sparse.csr_matrix(exog_vc)
glmm1 = PoissonBayesMixedGLM(y, exog_fe, exog_vc, ident, vcp_p=0.5)
rslt1 = glmm1.fit_map()
glmm2 = PoissonBayesMixedGLM(y, exog_fe, exog_vc, ident, vcp_p=0.5)
rslt2 = glmm2.fit_vb(rslt1.params)
rslt1.summary()
rslt2.summary()
assert_allclose(
rslt1.params[0:5],
np.r_[-0.07233493, -0.06706505, -0.47159649, 1.12575122, -1.02442201],
rtol=1e-4,
atol=1e-4)
assert_allclose(
rslt1.cov_params().flat[0:5],
np.r_[0.00790914, 0.00080666, -0.00050719, 0.00022648, 0.00046235],
rtol=1e-4,
atol=1e-4)
assert_allclose(
rslt2.params[0:5],
np.r_[-0.07088814, -0.06373107, -0.22770786, 1.12923746, -1.26161339],
rtol=1e-4,
atol=1e-4)
assert_allclose(
rslt2.cov_params()[0:5],
np.r_[0.00747782, 0.0092554, 0.04508904, 0.02934488, 0.20312746],
rtol=1e-4,
atol=1e-4)
for rslt in rslt1, rslt2:
cp = rslt.cov_params()
p = len(rslt.params)
if rslt is rslt1:
assert_equal(cp.shape, np.r_[p, p])
np.linalg.cholesky(cp)
else:
assert_equal(cp.shape, np.r_[p,])
assert_equal(cp > 0, True*np.ones(p))
def test_crossed_logit_vb():
y, exog_fe, exog_vc, ident = gen_crossed_logit(10, 10, 1, 2)
glmm1 = BinomialBayesMixedGLM(
y, exog_fe, exog_vc, ident, vcp_p=0.5, fe_p=0.5)
rslt1 = glmm1.fit_map()
glmm2 = BinomialBayesMixedGLM(
y, exog_fe, exog_vc, ident, vcp_p=0.5, fe_p=0.5)
rslt2 = glmm2.fit_vb(mean=rslt1.params)
rslt1.summary()
rslt2.summary()
assert_allclose(
rslt1.params[0:5],
np.r_[-5.43073978e-01, -2.46197518e+00, -2.36582801e+00,
-9.64030461e-03, 2.32701078e-03],
rtol=1e-4,
atol=1e-4)
assert_allclose(
rslt1.cov_params().flat[0:5],
np.r_[4.12927123e-02, -2.04448923e-04, 4.64829219e-05, 1.20377543e-04,
-1.45003234e-04],
rtol=1e-4,
atol=1e-4)
assert_allclose(
rslt2.params[0:5],
np.r_[-0.70834417, -0.3571011, 0.19126823, -0.36074489, 0.058976],
rtol=1e-4,
atol=1e-4)
assert_allclose(
rslt2.cov_params()[0:5],
np.r_[0.05212492, 0.04729656, 0.03916944, 0.25921842, 0.25782576],
rtol=1e-4,
atol=1e-4)
for rslt in rslt1, rslt2:
cp = rslt.cov_params()
p = len(rslt.params)
if rslt is rslt1:
assert_equal(cp.shape, np.r_[p, p])
np.linalg.cholesky(cp)
else:
assert_equal(cp.shape, np.r_[p,])
assert_equal(cp > 0, True*np.ones(p))
def test_crossed_logit_vb_formula():
data = gen_crossed_logit_pandas(10, 10, 1, 2)
fml = "y ~ fe"
fml_vc = {"a": "0 + C(a)", "b": "0 + C(b)"}
glmm1 = BinomialBayesMixedGLM.from_formula(fml, fml_vc, data, vcp_p=0.5)
rslt1 = glmm1.fit_vb()
glmm2 = BinomialBayesMixedGLM(
glmm1.endog, glmm1.exog, glmm1.exog_vc, glmm1.ident, vcp_p=0.5)
rslt2 = glmm2.fit_vb()
assert_allclose(rslt1.params, rslt2.params, atol=1e-4)
rslt1.summary()
rslt2.summary()
for rslt in rslt1, rslt2:
cp = rslt.cov_params()
p = len(rslt.params)
if rslt is rslt1:
assert_equal(cp.shape, np.r_[p,])
assert_equal(cp > 0, True*np.ones(p))
else:
assert_equal(cp.shape, np.r_[p,])
assert_equal(cp > 0, True*np.ones(p))
def test_crossed_poisson_vb():
y, exog_fe, exog_vc, ident = gen_crossed_poisson(10, 10, 1, 0.5)
glmm1 = PoissonBayesMixedGLM(
y, exog_fe, exog_vc, ident, vcp_p=0.5, fe_p=0.5)
rslt1 = glmm1.fit_map()
glmm2 = PoissonBayesMixedGLM(
y, exog_fe, exog_vc, ident, vcp_p=0.5, fe_p=0.5)
rslt2 = glmm2.fit_vb(mean=rslt1.params)
rslt1.summary()
rslt2.summary()
assert_allclose(
rslt1.params[0:5],
np.r_[-0.54855281, 0.10458834, -0.68777741, -0.01699925, 0.77200546],
rtol=1e-4,
atol=1e-4)
assert_allclose(
rslt2.params[0:5],
np.r_[-0.54691502, 0.22297158, -0.52673802, -0.06218684, 0.74385237],
rtol=1e-4,
atol=1e-4)
for rslt in rslt1, rslt2:
cp = rslt.cov_params()
p = len(rslt.params)
if rslt is rslt1:
assert_equal(cp.shape, np.r_[p, p])
np.linalg.cholesky(cp)
else:
assert_equal(cp.shape, np.r_[p,])
assert_equal(cp > 0, True*np.ones(p))
def test_poisson_formula():
y, exog_fe, exog_vc, ident = gen_crossed_poisson(10, 10, 1, 0.5)
for vb in False, True:
glmm1 = PoissonBayesMixedGLM(
y, exog_fe, exog_vc, ident)
if vb:
rslt1 = glmm1.fit_vb()
else:
rslt1 = glmm1.fit_map()
# Build categorical variables that match exog_vc
df = pd.DataFrame({"y": y, "x1": exog_fe[:, 0]})
z1 = np.zeros(len(y))
for j,k in enumerate(np.flatnonzero(ident == 0)):
z1[exog_vc[:, k] == 1] = j
df["z1"] = z1
z2 = np.zeros(len(y))
for j,k in enumerate(np.flatnonzero(ident == 1)):
z2[exog_vc[:, k] == 1] = j
df["z2"] = z2
fml = "y ~ 0 + x1"
vc_fml = {}
vc_fml["z1"] = "0 + C(z1)"
vc_fml["z2"] = "0 + C(z2)"
glmm2 = PoissonBayesMixedGLM.from_formula(fml, vc_fml, df)
if vb:
rslt2 = glmm2.fit_vb()
else:
rslt2 = glmm2.fit_map()
assert_allclose(rslt1.params, rslt2.params, rtol=1e-5)
for rslt in rslt1, rslt2:
cp = rslt.cov_params()
p = len(rslt.params)
if vb:
assert_equal(cp.shape, np.r_[p,])
assert_equal(cp > 0, True*np.ones(p))
else:
assert_equal(cp.shape, np.r_[p, p])
np.linalg.cholesky(cp)
def test_scale_vb():
y, exog_fe, exog_vc, ident = gen_simple_logit(10, 10, 0)
exog_fe -= exog_fe.mean(0)
exog_fe /= exog_fe.std(0)
exog_vc = sparse.csr_matrix(exog_vc)
rslts = []
for scale_fe in False, True:
glmm = BinomialBayesMixedGLM(
y, exog_fe, exog_vc, ident, vcp_p=0.5, fe_p=0.5)
rslt = glmm.fit_vb(scale_fe=scale_fe)
rslts.append(rslt)
assert_allclose(rslts[0].params, rslts[1].params, rtol=1e-4)
def test_scale_map():
y, exog_fe, exog_vc, ident = gen_simple_logit(10, 10, 0)
exog_fe -= exog_fe.mean(0)
exog_fe /= exog_fe.std(0)
exog_vc = sparse.csr_matrix(exog_vc)
rslts = []
for scale_fe in False, True:
glmm = BinomialBayesMixedGLM(
y, exog_fe, exog_vc, ident, vcp_p=0.5, fe_p=0.5)
rslt = glmm.fit_map(scale_fe=scale_fe)
rslts.append(rslt)
assert_allclose(rslts[0].params, rslts[1].params, rtol=1e-4)
def test_doc_examples():
np.random.seed(8767)
n = 200
m = 20
data = pd.DataFrame({"Year": np.random.uniform(0, 1, n),
"Village": np.random.randint(0, m, n)})
data['year_cen'] = data['Year'] - data.Year.mean()
# Binomial outcome
lpr = np.random.normal(size=m)[data.Village]
lpr += np.random.normal(size=m)[data.Village] * data.year_cen
y = (np.random.uniform(size=n) < 1 / (1 + np.exp(-lpr)))
data["y"] = y.astype(int)
# These lines should agree with the example in the class docstring.
random = {"a": '0 + C(Village)', "b": '0 + C(Village)*year_cen'}
model = BinomialBayesMixedGLM.from_formula(
'y ~ year_cen', random, data)
result = model.fit_vb()
_ = result
# Poisson outcome
lpr = np.random.normal(size=m)[data.Village]
lpr += np.random.normal(size=m)[data.Village] * data.year_cen
data["y"] = np.random.poisson(np.exp(lpr))
# These lines should agree with the example in the class docstring.
random = {"a": '0 + C(Village)', "b": '0 + C(Village)*year_cen'}
model = PoissonBayesMixedGLM.from_formula(
'y ~ year_cen', random, data)
result = model.fit_vb()
_ = result
|
bsd-3-clause
|
kvmu/SFU-workterm
|
factory_analysis.py
|
1
|
10770
|
import ROOT
import numpy as np
import root_numpy as rootnp
import kernel
import vegas as VEGAS
import os
import re
# import matplotlib
# matplotlib.use('Agg')
# import matplotlib.pyplot as plt
from timer import timer
# Library used to lead the data in this script
ROOT.gSystem.Load('libExRootAnalysis.so') # Path not specified because Daniel has automatically taken care of this in "global" profile.
nexternal = 8 # Number of exernal lines
tlvdim = 4 # Number of dimensions in a 4-vector
ndim = nexternal*tlvdim # Number of dimensions in 1 truth event
###############################################################################
# Set script parameters #
###############################################################################
nevts = 1000
gpu = "cuda" # opencl or cuda
dof = 4 # Degrees of freedom (do not know incoming partons [able to calculate calculate] and neutrinos)
neval = 8192*2 # Number of function evaluations for integration
nitn = 5 # Number of interations of the VEGAS algoritm (integration)
# want_b_veto = False # Do you want to include b-veto
amtOfInfoPerEvent = 16 # The number of indicies in signal_event_data and kernel_eventData array per event ( 2l + 2j = 2*4 + 2*4 = 16)
intDomain = 1000
kernelList = ['vbf125', 'ttbar']
for kernelMEM in kernelList:
basedir = os.getcwd()
if kernelMEM != '':
os.chdir(kernelMEM)
###############################################################################
# Pull background and signal #
###############################################################################
file_path = '/home/kvmu/vbf/vbf_ttbar_run1_tupleData'
fileList = sorted(os.listdir(file_path))
for file in fileList:
process_name = file.split('.')[0]
print "\n########## " + process_name + " is being analysed through "+ kernelMEM + " ME ##########"
fh = ROOT.TFile(file_path +'/'+ file, 'read') # Read the background file
inTree = fh.Get('Output') # Obtain the tree from file
# Not using any cuts
# if want_b_veto:
# cuts = '((nbt20==0))*(lepID0*lepID1<0)*((l0pt>22) || (l1pt>22))*(nj>=2)*(cjv_leadPt<20.)*(olv==1)*(mtt_TrackHWW_Clj < 66.1876)*(ne==1)*(nm==1)*(mll>10)'
# else:
# cuts = '(lepID0*lepID1<0)*((l0pt>22) || (l1pt>22))*(nj>=2)*(cjv_leadPt<20.)*(olv==1)*(mtt_TrackHWW_Clj < 66.1876)*(ne==1)*(nm==1)*(mll>10)'
# Create variable list to extract from tree and apply proper conversion from from (pt, eta, phi) coordinate system to (E, px, py, pz)
varlist = [\
# Energy for lepton 0 (puts only positive charge leptons here) / Assume mass negligible
'sqrt( (lepID0 < 0)*(l0pt*cosh(lepEta0))**2 + (lepID1 < 0)*(l1pt*cosh(lepEta1))**2 )', \
# P_x for lepton 0
'(lepID0 < 0)*l0pt*cos(lepPhi0) + (lepID1 < 0)*l1pt*cos(lepPhi1) ',\
# P_y for lepton 0
'(lepID0 < 0)*l0pt*sin(lepPhi0) + (lepID1 < 0)*l1pt*sin(lepPhi1)',\
# P_z for lepton 0
'(lepID0 < 0)*l0pt*sinh(lepEta0) + (lepID1 < 0)*l1pt*sinh(lepEta1)',\
# Energy for lepton 1 (puts only negative charge leptons here) / Assume mass negligible
'sqrt( (lepID0 > 0)*(l0pt*cosh(lepEta0))**2 + (lepID1 > 0)*(l1pt*cosh(lepEta1))**2 )', \
# P_x for lepton 1
'(lepID0 > 0)*l0pt*cos(lepPhi0) + (lepID1 > 0)*l1pt*cos(lepPhi1) ',\
# P_y for lepton 1
'(lepID0 > 0)*l0pt*sin(lepPhi0) + (lepID1 > 0)*l1pt*sin(lepPhi1)',\
# P_z for lepton 1
'(lepID0 > 0)*l0pt*sinh(lepEta0) + (lepID1 > 0)*l1pt*sinh(lepEta1)',\
# Energy for jet 0
'jetE0',\
# P_x for jet 0
'jetPt0*cos(jetPhi0)',\
# P_y for jet 0
'jetPt0*sin(jetPhi0)',\
# P_z for jet 0
'jetPt0*sinh(jetEta0)',\
# Energy for jet 1
'jetE1',\
# P_x for jet 1
'jetPt1*cos(jetPhi1)',\
# P_y for jet 1
'jetPt1*sin(jetPhi1)',\
# P_z for jet 1
'jetPt1*sinh(jetEta1)',\
# Event weight no bveto
'w/MV120_85_EventWeight',\
# Event weight with bveto
'w',\
]
# Pull data from tree
dataRecord = rootnp.tree2rec(inTree, branches = varlist)#, selection = cuts)
fh.Close() # Close background input file
# Give names to data taken from dataRecord
dataRecord.dtype.names = ('lepE0','lepPx0','lepPy0','lepPz0',\
'lepE1','lepPx1','lepPy1','lepPz1',\
'jetE0','jetPx0','jetPy0','jetPz0',\
'jetE1','jetPx1','jetPy1','jetPz1',\
'w_div_MV120_85_EventWeight','w',\
)
nevts = min(nevts, dataRecord.size)
print "Number of "+ process_name +" Events: {0}".format(nevts)
kernel_eventData = np.zeros((nevts, amtOfInfoPerEvent))
# Populate the kernel_eventData matrix, use numpy vector assignment techniques (faster than for loop)
kernel_eventData[:, 0*tlvdim + 0] = dataRecord.lepE0[0:nevts]
kernel_eventData[:, 0*tlvdim + 1] = dataRecord.lepPx0[0:nevts]
kernel_eventData[:, 0*tlvdim + 2] = dataRecord.lepPy0[0:nevts]
kernel_eventData[:, 0*tlvdim + 3] = dataRecord.lepPz0[0:nevts]
kernel_eventData[:, 1*tlvdim + 0] = dataRecord.lepE1[0:nevts]
kernel_eventData[:, 1*tlvdim + 1] = dataRecord.lepPx1[0:nevts]
kernel_eventData[:, 1*tlvdim + 2] = dataRecord.lepPy1[0:nevts]
kernel_eventData[:, 1*tlvdim + 3] = dataRecord.lepPz1[0:nevts]
kernel_eventData[:, 2*tlvdim + 0] = dataRecord.jetE0[0:nevts]
kernel_eventData[:, 2*tlvdim + 1] = dataRecord.jetPx0[0:nevts]
kernel_eventData[:, 2*tlvdim + 2] = dataRecord.jetPy0[0:nevts]
kernel_eventData[:, 2*tlvdim + 3] = dataRecord.jetPz0[0:nevts]
kernel_eventData[:, 3*tlvdim + 0] = dataRecord.jetE1[0:nevts]
kernel_eventData[:, 3*tlvdim + 1] = dataRecord.jetPx1[0:nevts]
kernel_eventData[:, 3*tlvdim + 2] = dataRecord.jetPy1[0:nevts]
kernel_eventData[:, 3*tlvdim + 3] = dataRecord.jetPz1[0:nevts]
kernel_eventData = kernel_eventData.flatten()
print '########## Data Retrieval Complete ##########'
print '.............................................'
print '############# Initializing MEM ##############'
#************************** User defined function ****************************#
def setparams():
# Read in the parameters of the model using SLHAReader
card_reader = ROOT.SLHAReader("cards/params.dat")
pobj = ROOT.parameters_sm()
pobj.set_independent_parameters(card_reader)
pobj.set_independent_couplings();
pobj.set_dependent_parameters();
pobj.set_dependent_couplings();
return pobj
#*****************************************************************************#
###############################################################################
# Process Matrix Elements through kernel #
###############################################################################
# Load shared library of kernel src/ files to ROOT
ROOT.gROOT.SetBatch(0)
ROOT.gSystem.Load('lib/libpdf.so')
ROOT.gSystem.Load("lib/libme.so")
# Set the parameters of the model using the parameter card(s)
pobj = setparams()
# Create kernel object
MEObj = kernel.kernel(nexternal-dof, amtOfInfoPerEvent, pobj, mode = gpu, pdfsetn = 'CT10', kernelfn = "kernel/kernel.cl", pR = neval)
# Initialize results arrays
resultsVector = np.zeros(nevts, dtype = kernel.a_float_t)
class batch_func(VEGAS.BatchIntegrand):
def __init__(self, ME_kernel_object = None):
self.ME_kernel_object = ME_kernel_object
def __call__(self, neutrino_space):
eval = self.ME_kernel_object.eval(xp = neutrino_space)
return eval
# Set the kinematic values for the each event in the kernel object in order to evaluate the ME.
write_path = "/home/kvmu/vbf/vbf_plotting"
kern_output = "/z_"+process_name+"data_"+kernelMEM+"mem.txt"
kernOut = open(write_path + kern_output, 'w+')
integral = VEGAS.Integrator([[-intDomain, intDomain]]*4)
v = batch_func(ME_kernel_object = MEObj)
timeList = []
for ievt in range(nevts):
iteration = 0
Qfound = False
MEObj.set_momenta((kernel_eventData[ievt*amtOfInfoPerEvent:(ievt+1)*amtOfInfoPerEvent]).astype(kernel.a_float_t))
with timer() as localTime:
while(iteration < 5):
integral(v, nitn = 3, neval = neval, nhcube_batch = neval) # Adapt the grid before saving results
resultant = integral(v, nitn = nitn, neval = neval, nhcube_batch = neval)
if resultant.Q > 0.2:
kernOut.write(str(resultant.mean) + "\t" + str(resultant.sdev) + "\t" + str(resultant.chi2) + "\t" + str(resultant.dof) + "\t" + str(resultant.Q) + "\n")
Qfound = True
break
else:
iteration+=1
if not Qfound:
kernOut.write(str(resultant.mean) + "\t" + str(resultant.sdev) + "\t" + str(resultant.chi2) + "\t" + str(resultant.dof) + "\t" + str(resultant.Q) + "\n")
timeList.append(localTime.secs)
kernOut.close()
totalRunTime = sum(timeList)
timePerEvent = totalRunTime/nevts
paramOutput = '/zz_run_parameters_'+process_name+'_'+kernelMEM+"mem.txt"
paramOut = open(write_path+paramOutput, 'w+')
paramOut.write('Number of events: \t %i \n' %(nevts))
paramOut.write('Number of phase space points: \t %i \n' %neval)
paramOut.write('Number of iterations: \t %i \n' %nitn)
paramOut.write("Total run time: \t %.2f s" % totalRunTime)
paramOut.close()
print "Total run time: \t %.2f s" % totalRunTime
print "Average time per event: \t %.2f s" % timePerEvent
print 'Number of events: \t %i' %(nevts)
print 'Number of phase space points: \t %i' %neval
print 'Number of iterations: \t %i' %nitn
os.chdir(basedir)
|
mit
|
georgijd/influxdb-python
|
influxdb/tests/dataframe_client_test.py
|
2
|
13437
|
# -*- coding: utf-8 -*-
"""
unit tests for misc module
"""
from .client_test import _mocked_session
import sys
if sys.version_info < (2, 7):
import unittest2 as unittest
else:
import unittest
import json
import requests_mock
from nose.tools import raises
from datetime import timedelta
from influxdb.tests import skipIfPYpy, using_pypy
import warnings
if not using_pypy:
import pandas as pd
from pandas.util.testing import assert_frame_equal
from influxdb import DataFrameClient
@skipIfPYpy
class TestDataFrameClient(unittest.TestCase):
def setUp(self):
# By default, raise exceptions on warnings
warnings.simplefilter('error', FutureWarning)
def test_write_points_from_dataframe(self):
now = pd.Timestamp('1970-01-01 00:00+00:00')
dataframe = pd.DataFrame(data=[["1", 1, 1.0], ["2", 2, 2.0]],
index=[now, now + timedelta(hours=1)],
columns=["column_one", "column_two",
"column_three"])
expected = (
b"foo column_one=\"1\",column_three=1.0,column_two=1i 0\n"
b"foo column_one=\"2\",column_three=2.0,column_two=2i "
b"3600000000000\n"
)
with requests_mock.Mocker() as m:
m.register_uri(requests_mock.POST,
"http://localhost:8086/write",
status_code=204)
cli = DataFrameClient(database='db')
cli.write_points(dataframe, 'foo')
self.assertEqual(m.last_request.body, expected)
cli.write_points(dataframe, 'foo', tags=None)
self.assertEqual(m.last_request.body, expected)
def test_write_points_from_dataframe_in_batches(self):
now = pd.Timestamp('1970-01-01 00:00+00:00')
dataframe = pd.DataFrame(data=[["1", 1, 1.0], ["2", 2, 2.0]],
index=[now, now + timedelta(hours=1)],
columns=["column_one", "column_two",
"column_three"])
with requests_mock.Mocker() as m:
m.register_uri(requests_mock.POST,
"http://localhost:8086/write",
status_code=204)
cli = DataFrameClient(database='db')
self.assertTrue(cli.write_points(dataframe, "foo", batch_size=1))
def test_write_points_from_dataframe_with_numeric_column_names(self):
now = pd.Timestamp('1970-01-01 00:00+00:00')
# df with numeric column names
dataframe = pd.DataFrame(data=[["1", 1, 1.0], ["2", 2, 2.0]],
index=[now, now + timedelta(hours=1)])
expected = (
b'foo,hello=there 0=\"1\",1=1i,2=1.0 0\n'
b'foo,hello=there 0=\"2\",1=2i,2=2.0 3600000000000\n'
)
with requests_mock.Mocker() as m:
m.register_uri(requests_mock.POST,
"http://localhost:8086/write",
status_code=204)
cli = DataFrameClient(database='db')
cli.write_points(dataframe, "foo", {"hello": "there"})
self.assertEqual(m.last_request.body, expected)
def test_write_points_from_dataframe_with_period_index(self):
dataframe = pd.DataFrame(data=[["1", 1, 1.0], ["2", 2, 2.0]],
index=[pd.Period('1970-01-01'),
pd.Period('1970-01-02')],
columns=["column_one", "column_two",
"column_three"])
expected = (
b"foo column_one=\"1\",column_three=1.0,column_two=1i 0\n"
b"foo column_one=\"2\",column_three=2.0,column_two=2i "
b"86400000000000\n"
)
with requests_mock.Mocker() as m:
m.register_uri(requests_mock.POST,
"http://localhost:8086/write",
status_code=204)
cli = DataFrameClient(database='db')
cli.write_points(dataframe, "foo")
self.assertEqual(m.last_request.body, expected)
def test_write_points_from_dataframe_with_time_precision(self):
now = pd.Timestamp('1970-01-01 00:00+00:00')
dataframe = pd.DataFrame(data=[["1", 1, 1.0], ["2", 2, 2.0]],
index=[now, now + timedelta(hours=1)],
columns=["column_one", "column_two",
"column_three"])
with requests_mock.Mocker() as m:
m.register_uri(requests_mock.POST,
"http://localhost:8086/write",
status_code=204)
cli = DataFrameClient(database='db')
measurement = "foo"
cli.write_points(dataframe, measurement, time_precision='h')
self.assertEqual(m.last_request.qs['precision'], ['h'])
self.assertEqual(
b'foo column_one="1",column_three=1.0,column_two=1i 0\nfoo '
b'column_one="2",column_three=2.0,column_two=2i 1\n',
m.last_request.body,
)
cli.write_points(dataframe, measurement, time_precision='m')
self.assertEqual(m.last_request.qs['precision'], ['m'])
self.assertEqual(
b'foo column_one="1",column_three=1.0,column_two=1i 0\nfoo '
b'column_one="2",column_three=2.0,column_two=2i 60\n',
m.last_request.body,
)
cli.write_points(dataframe, measurement, time_precision='s')
self.assertEqual(m.last_request.qs['precision'], ['s'])
self.assertEqual(
b'foo column_one="1",column_three=1.0,column_two=1i 0\nfoo '
b'column_one="2",column_three=2.0,column_two=2i 3600\n',
m.last_request.body,
)
cli.write_points(dataframe, measurement, time_precision='ms')
self.assertEqual(m.last_request.qs['precision'], ['ms'])
self.assertEqual(
b'foo column_one="1",column_three=1.0,column_two=1i 0\nfoo '
b'column_one="2",column_three=2.0,column_two=2i 3600000\n',
m.last_request.body,
)
cli.write_points(dataframe, measurement, time_precision='u')
self.assertEqual(m.last_request.qs['precision'], ['u'])
self.assertEqual(
b'foo column_one="1",column_three=1.0,column_two=1i 0\nfoo '
b'column_one="2",column_three=2.0,column_two=2i 3600000000\n',
m.last_request.body,
)
cli.write_points(dataframe, measurement, time_precision='n')
self.assertEqual(m.last_request.qs['precision'], ['n'])
self.assertEqual(
b'foo column_one="1",column_three=1.0,column_two=1i 0\n'
b'foo column_one="2",column_three=2.0,column_two=2i '
b'3600000000000\n',
m.last_request.body,
)
@raises(TypeError)
def test_write_points_from_dataframe_fails_without_time_index(self):
dataframe = pd.DataFrame(data=[["1", 1, 1.0], ["2", 2, 2.0]],
columns=["column_one", "column_two",
"column_three"])
with requests_mock.Mocker() as m:
m.register_uri(requests_mock.POST,
"http://localhost:8086/db/db/series",
status_code=204)
cli = DataFrameClient(database='db')
cli.write_points(dataframe, "foo")
@raises(TypeError)
def test_write_points_from_dataframe_fails_with_series(self):
now = pd.Timestamp('1970-01-01 00:00+00:00')
dataframe = pd.Series(data=[1.0, 2.0],
index=[now, now + timedelta(hours=1)])
with requests_mock.Mocker() as m:
m.register_uri(requests_mock.POST,
"http://localhost:8086/db/db/series",
status_code=204)
cli = DataFrameClient(database='db')
cli.write_points(dataframe, "foo")
def test_query_into_dataframe(self):
data = {
"results": [{
"series": [
{"measurement": "network",
"tags": {"direction": ""},
"columns": ["time", "value"],
"values":[["2009-11-10T23:00:00Z", 23422]]
},
{"measurement": "network",
"tags": {"direction": "in"},
"columns": ["time", "value"],
"values": [["2009-11-10T23:00:00Z", 23422],
["2009-11-10T23:00:00Z", 23422],
["2009-11-10T23:00:00Z", 23422]]
}
]
}]
}
pd1 = pd.DataFrame(
[[23422]], columns=['value'],
index=pd.to_datetime(["2009-11-10T23:00:00Z"]))
pd1.index = pd1.index.tz_localize('UTC')
pd2 = pd.DataFrame(
[[23422], [23422], [23422]], columns=['value'],
index=pd.to_datetime(["2009-11-10T23:00:00Z",
"2009-11-10T23:00:00Z",
"2009-11-10T23:00:00Z"]))
pd2.index = pd2.index.tz_localize('UTC')
expected = {
('network', (('direction', ''),)): pd1,
('network', (('direction', 'in'),)): pd2
}
cli = DataFrameClient('host', 8086, 'username', 'password', 'db')
with _mocked_session(cli, 'GET', 200, data):
result = cli.query('select value from network group by direction;')
for k in expected:
assert_frame_equal(expected[k], result[k])
def test_query_with_empty_result(self):
cli = DataFrameClient('host', 8086, 'username', 'password', 'db')
with _mocked_session(cli, 'GET', 200, {"results": [{}]}):
result = cli.query('select column_one from foo;')
self.assertEqual(result, {})
def test_list_series(self):
response = {
'results': [
{'series': [
{
'columns': ['host'],
'measurement': 'cpu',
'values': [
['server01']]
},
{
'columns': [
'host',
'region'
],
'measurement': 'network',
'values': [
[
'server01',
'us-west'
],
[
'server01',
'us-east'
]
]
}
]}
]
}
expected = {
'cpu': pd.DataFrame([['server01']], columns=['host']),
'network': pd.DataFrame(
[['server01', 'us-west'], ['server01', 'us-east']],
columns=['host', 'region'])}
cli = DataFrameClient('host', 8086, 'username', 'password', 'db')
with _mocked_session(cli, 'GET', 200, response):
series = cli.get_list_series()
assert_frame_equal(series['cpu'], expected['cpu'])
assert_frame_equal(series['network'], expected['network'])
def test_get_list_database(self):
data = {'results': [
{'series': [
{'measurement': 'databases',
'values': [
['new_db_1'],
['new_db_2']],
'columns': ['name']}]}
]}
cli = DataFrameClient('host', 8086, 'username', 'password', 'db')
with _mocked_session(cli, 'get', 200, json.dumps(data)):
self.assertListEqual(
cli.get_list_database(),
[{'name': 'new_db_1'}, {'name': 'new_db_2'}]
)
def test_datetime_to_epoch(self):
timestamp = pd.Timestamp('2013-01-01 00:00:00.000+00:00')
cli = DataFrameClient('host', 8086, 'username', 'password', 'db')
self.assertEqual(
cli._datetime_to_epoch(timestamp),
1356998400.0
)
self.assertEqual(
cli._datetime_to_epoch(timestamp, time_precision='h'),
1356998400.0 / 3600
)
self.assertEqual(
cli._datetime_to_epoch(timestamp, time_precision='m'),
1356998400.0 / 60
)
self.assertEqual(
cli._datetime_to_epoch(timestamp, time_precision='s'),
1356998400.0
)
self.assertEqual(
cli._datetime_to_epoch(timestamp, time_precision='ms'),
1356998400000.0
)
self.assertEqual(
cli._datetime_to_epoch(timestamp, time_precision='u'),
1356998400000000.0
)
self.assertEqual(
cli._datetime_to_epoch(timestamp, time_precision='n'),
1356998400000000000.0
)
|
mit
|
MatthieuBizien/scikit-learn
|
examples/plot_kernel_ridge_regression.py
|
39
|
6259
|
"""
=============================================
Comparison of kernel ridge regression and SVR
=============================================
Both kernel ridge regression (KRR) and SVR learn a non-linear function by
employing the kernel trick, i.e., they learn a linear function in the space
induced by the respective kernel which corresponds to a non-linear function in
the original space. They differ in the loss functions (ridge versus
epsilon-insensitive loss). In contrast to SVR, fitting a KRR can be done in
closed-form and is typically faster for medium-sized datasets. On the other
hand, the learned model is non-sparse and thus slower than SVR at
prediction-time.
This example illustrates both methods on an artificial dataset, which
consists of a sinusoidal target function and strong noise added to every fifth
datapoint. The first figure compares the learned model of KRR and SVR when both
complexity/regularization and bandwidth of the RBF kernel are optimized using
grid-search. The learned functions are very similar; however, fitting KRR is
approx. seven times faster than fitting SVR (both with grid-search). However,
prediction of 100000 target values is more than tree times faster with SVR
since it has learned a sparse model using only approx. 1/3 of the 100 training
datapoints as support vectors.
The next figure compares the time for fitting and prediction of KRR and SVR for
different sizes of the training set. Fitting KRR is faster than SVR for medium-
sized training sets (less than 1000 samples); however, for larger training sets
SVR scales better. With regard to prediction time, SVR is faster than
KRR for all sizes of the training set because of the learned sparse
solution. Note that the degree of sparsity and thus the prediction time depends
on the parameters epsilon and C of the SVR.
"""
# Authors: Jan Hendrik Metzen <[email protected]>
# License: BSD 3 clause
from __future__ import division
import time
import numpy as np
from sklearn.svm import SVR
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import learning_curve
from sklearn.kernel_ridge import KernelRidge
import matplotlib.pyplot as plt
rng = np.random.RandomState(0)
#############################################################################
# Generate sample data
X = 5 * rng.rand(10000, 1)
y = np.sin(X).ravel()
# Add noise to targets
y[::5] += 3 * (0.5 - rng.rand(X.shape[0]/5))
X_plot = np.linspace(0, 5, 100000)[:, None]
#############################################################################
# Fit regression model
train_size = 100
svr = GridSearchCV(SVR(kernel='rbf', gamma=0.1), cv=5,
param_grid={"C": [1e0, 1e1, 1e2, 1e3],
"gamma": np.logspace(-2, 2, 5)})
kr = GridSearchCV(KernelRidge(kernel='rbf', gamma=0.1), cv=5,
param_grid={"alpha": [1e0, 0.1, 1e-2, 1e-3],
"gamma": np.logspace(-2, 2, 5)})
t0 = time.time()
svr.fit(X[:train_size], y[:train_size])
svr_fit = time.time() - t0
print("SVR complexity and bandwidth selected and model fitted in %.3f s"
% svr_fit)
t0 = time.time()
kr.fit(X[:train_size], y[:train_size])
kr_fit = time.time() - t0
print("KRR complexity and bandwidth selected and model fitted in %.3f s"
% kr_fit)
sv_ratio = svr.best_estimator_.support_.shape[0] / train_size
print("Support vector ratio: %.3f" % sv_ratio)
t0 = time.time()
y_svr = svr.predict(X_plot)
svr_predict = time.time() - t0
print("SVR prediction for %d inputs in %.3f s"
% (X_plot.shape[0], svr_predict))
t0 = time.time()
y_kr = kr.predict(X_plot)
kr_predict = time.time() - t0
print("KRR prediction for %d inputs in %.3f s"
% (X_plot.shape[0], kr_predict))
#############################################################################
# look at the results
sv_ind = svr.best_estimator_.support_
plt.scatter(X[sv_ind], y[sv_ind], c='r', s=50, label='SVR support vectors',
zorder=2)
plt.scatter(X[:100], y[:100], c='k', label='data', zorder=1)
plt.hold('on')
plt.plot(X_plot, y_svr, c='r',
label='SVR (fit: %.3fs, predict: %.3fs)' % (svr_fit, svr_predict))
plt.plot(X_plot, y_kr, c='g',
label='KRR (fit: %.3fs, predict: %.3fs)' % (kr_fit, kr_predict))
plt.xlabel('data')
plt.ylabel('target')
plt.title('SVR versus Kernel Ridge')
plt.legend()
# Visualize training and prediction time
plt.figure()
# Generate sample data
X = 5 * rng.rand(10000, 1)
y = np.sin(X).ravel()
y[::5] += 3 * (0.5 - rng.rand(X.shape[0]/5))
sizes = np.logspace(1, 4, 7)
for name, estimator in {"KRR": KernelRidge(kernel='rbf', alpha=0.1,
gamma=10),
"SVR": SVR(kernel='rbf', C=1e1, gamma=10)}.items():
train_time = []
test_time = []
for train_test_size in sizes:
t0 = time.time()
estimator.fit(X[:train_test_size], y[:train_test_size])
train_time.append(time.time() - t0)
t0 = time.time()
estimator.predict(X_plot[:1000])
test_time.append(time.time() - t0)
plt.plot(sizes, train_time, 'o-', color="r" if name == "SVR" else "g",
label="%s (train)" % name)
plt.plot(sizes, test_time, 'o--', color="r" if name == "SVR" else "g",
label="%s (test)" % name)
plt.xscale("log")
plt.yscale("log")
plt.xlabel("Train size")
plt.ylabel("Time (seconds)")
plt.title('Execution Time')
plt.legend(loc="best")
# Visualize learning curves
plt.figure()
svr = SVR(kernel='rbf', C=1e1, gamma=0.1)
kr = KernelRidge(kernel='rbf', alpha=0.1, gamma=0.1)
train_sizes, train_scores_svr, test_scores_svr = \
learning_curve(svr, X[:100], y[:100], train_sizes=np.linspace(0.1, 1, 10),
scoring="mean_squared_error", cv=10)
train_sizes_abs, train_scores_kr, test_scores_kr = \
learning_curve(kr, X[:100], y[:100], train_sizes=np.linspace(0.1, 1, 10),
scoring="mean_squared_error", cv=10)
plt.plot(train_sizes, test_scores_svr.mean(1), 'o-', color="r",
label="SVR")
plt.plot(train_sizes, test_scores_kr.mean(1), 'o-', color="g",
label="KRR")
plt.xlabel("Train size")
plt.ylabel("Mean Squared Error")
plt.title('Learning curves')
plt.legend(loc="best")
plt.show()
|
bsd-3-clause
|
lancezlin/ml_template_py
|
lib/python2.7/site-packages/matplotlib/scale.py
|
8
|
18857
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
import numpy as np
from numpy import ma
from matplotlib.cbook import dedent
from matplotlib.ticker import (NullFormatter, ScalarFormatter,
LogFormatterMathtext, LogitFormatter)
from matplotlib.ticker import (NullLocator, LogLocator, AutoLocator,
SymmetricalLogLocator, LogitLocator)
from matplotlib.transforms import Transform, IdentityTransform
from matplotlib import docstring
class ScaleBase(object):
"""
The base class for all scales.
Scales are separable transformations, working on a single dimension.
Any subclasses will want to override:
- :attr:`name`
- :meth:`get_transform`
- :meth:`set_default_locators_and_formatters`
And optionally:
- :meth:`limit_range_for_scale`
"""
def get_transform(self):
"""
Return the :class:`~matplotlib.transforms.Transform` object
associated with this scale.
"""
raise NotImplementedError()
def set_default_locators_and_formatters(self, axis):
"""
Set the :class:`~matplotlib.ticker.Locator` and
:class:`~matplotlib.ticker.Formatter` objects on the given
axis to match this scale.
"""
raise NotImplementedError()
def limit_range_for_scale(self, vmin, vmax, minpos):
"""
Returns the range *vmin*, *vmax*, possibly limited to the
domain supported by this scale.
*minpos* should be the minimum positive value in the data.
This is used by log scales to determine a minimum value.
"""
return vmin, vmax
class LinearScale(ScaleBase):
"""
The default linear scale.
"""
name = 'linear'
def __init__(self, axis, **kwargs):
pass
def set_default_locators_and_formatters(self, axis):
"""
Set the locators and formatters to reasonable defaults for
linear scaling.
"""
axis.set_major_locator(AutoLocator())
axis.set_major_formatter(ScalarFormatter())
axis.set_minor_locator(NullLocator())
axis.set_minor_formatter(NullFormatter())
def get_transform(self):
"""
The transform for linear scaling is just the
:class:`~matplotlib.transforms.IdentityTransform`.
"""
return IdentityTransform()
def _mask_non_positives(a):
"""
Return a Numpy array where all non-positive values are
replaced with NaNs. If there are no non-positive values, the
original array is returned.
"""
mask = a <= 0.0
if mask.any():
return np.where(mask, np.nan, a)
return a
def _clip_non_positives(a):
a = np.array(a, float)
a[a <= 0.0] = 1e-300
return a
class LogTransformBase(Transform):
input_dims = 1
output_dims = 1
is_separable = True
has_inverse = True
def __init__(self, nonpos):
Transform.__init__(self)
if nonpos == 'mask':
self._handle_nonpos = _mask_non_positives
else:
self._handle_nonpos = _clip_non_positives
class Log10Transform(LogTransformBase):
base = 10.0
def transform_non_affine(self, a):
a = self._handle_nonpos(a * 10.0)
return np.log10(a)
def inverted(self):
return InvertedLog10Transform()
class InvertedLog10Transform(Transform):
input_dims = 1
output_dims = 1
is_separable = True
has_inverse = True
base = 10.0
def transform_non_affine(self, a):
return ma.power(10.0, a) / 10.0
def inverted(self):
return Log10Transform()
class Log2Transform(LogTransformBase):
base = 2.0
def transform_non_affine(self, a):
a = self._handle_nonpos(a * 2.0)
return np.log2(a)
def inverted(self):
return InvertedLog2Transform()
class InvertedLog2Transform(Transform):
input_dims = 1
output_dims = 1
is_separable = True
has_inverse = True
base = 2.0
def transform_non_affine(self, a):
return ma.power(2.0, a) / 2.0
def inverted(self):
return Log2Transform()
class NaturalLogTransform(LogTransformBase):
base = np.e
def transform_non_affine(self, a):
a = self._handle_nonpos(a * np.e)
return np.log(a)
def inverted(self):
return InvertedNaturalLogTransform()
class InvertedNaturalLogTransform(Transform):
input_dims = 1
output_dims = 1
is_separable = True
has_inverse = True
base = np.e
def transform_non_affine(self, a):
return ma.power(np.e, a) / np.e
def inverted(self):
return NaturalLogTransform()
class LogTransform(Transform):
input_dims = 1
output_dims = 1
is_separable = True
has_inverse = True
def __init__(self, base, nonpos):
Transform.__init__(self)
self.base = base
if nonpos == 'mask':
self._handle_nonpos = _mask_non_positives
else:
self._handle_nonpos = _clip_non_positives
def transform_non_affine(self, a):
a = self._handle_nonpos(a * self.base)
return np.log(a) / np.log(self.base)
def inverted(self):
return InvertedLogTransform(self.base)
class InvertedLogTransform(Transform):
input_dims = 1
output_dims = 1
is_separable = True
has_inverse = True
def __init__(self, base):
Transform.__init__(self)
self.base = base
def transform_non_affine(self, a):
return ma.power(self.base, a) / self.base
def inverted(self):
return LogTransform(self.base)
class LogScale(ScaleBase):
"""
A standard logarithmic scale. Care is taken so non-positive
values are not plotted.
For computational efficiency (to push as much as possible to Numpy
C code in the common cases), this scale provides different
transforms depending on the base of the logarithm:
- base 10 (:class:`Log10Transform`)
- base 2 (:class:`Log2Transform`)
- base e (:class:`NaturalLogTransform`)
- arbitrary base (:class:`LogTransform`)
"""
name = 'log'
# compatibility shim
LogTransformBase = LogTransformBase
Log10Transform = Log10Transform
InvertedLog10Transform = InvertedLog10Transform
Log2Transform = Log2Transform
InvertedLog2Transform = InvertedLog2Transform
NaturalLogTransform = NaturalLogTransform
InvertedNaturalLogTransform = InvertedNaturalLogTransform
LogTransform = LogTransform
InvertedLogTransform = InvertedLogTransform
def __init__(self, axis, **kwargs):
"""
*basex*/*basey*:
The base of the logarithm
*nonposx*/*nonposy*: ['mask' | 'clip' ]
non-positive values in *x* or *y* can be masked as
invalid, or clipped to a very small positive number
*subsx*/*subsy*:
Where to place the subticks between each major tick.
Should be a sequence of integers. For example, in a log10
scale: ``[2, 3, 4, 5, 6, 7, 8, 9]``
will place 8 logarithmically spaced minor ticks between
each major tick.
"""
if axis.axis_name == 'x':
base = kwargs.pop('basex', 10.0)
subs = kwargs.pop('subsx', None)
nonpos = kwargs.pop('nonposx', 'mask')
else:
base = kwargs.pop('basey', 10.0)
subs = kwargs.pop('subsy', None)
nonpos = kwargs.pop('nonposy', 'mask')
if nonpos not in ['mask', 'clip']:
raise ValueError("nonposx, nonposy kwarg must be 'mask' or 'clip'")
if base == 10.0:
self._transform = self.Log10Transform(nonpos)
elif base == 2.0:
self._transform = self.Log2Transform(nonpos)
elif base == np.e:
self._transform = self.NaturalLogTransform(nonpos)
else:
self._transform = self.LogTransform(base, nonpos)
self.base = base
self.subs = subs
def set_default_locators_and_formatters(self, axis):
"""
Set the locators and formatters to specialized versions for
log scaling.
"""
axis.set_major_locator(LogLocator(self.base))
axis.set_major_formatter(LogFormatterMathtext(self.base))
axis.set_minor_locator(LogLocator(self.base, self.subs))
axis.set_minor_formatter(NullFormatter())
def get_transform(self):
"""
Return a :class:`~matplotlib.transforms.Transform` instance
appropriate for the given logarithm base.
"""
return self._transform
def limit_range_for_scale(self, vmin, vmax, minpos):
"""
Limit the domain to positive values.
"""
return (vmin <= 0.0 and minpos or vmin,
vmax <= 0.0 and minpos or vmax)
class SymmetricalLogTransform(Transform):
input_dims = 1
output_dims = 1
is_separable = True
has_inverse = True
def __init__(self, base, linthresh, linscale):
Transform.__init__(self)
self.base = base
self.linthresh = linthresh
self.linscale = linscale
self._linscale_adj = (linscale / (1.0 - self.base ** -1))
self._log_base = np.log(base)
def transform_non_affine(self, a):
sign = np.sign(a)
masked = ma.masked_inside(a,
-self.linthresh,
self.linthresh,
copy=False)
log = sign * self.linthresh * (
self._linscale_adj +
ma.log(np.abs(masked) / self.linthresh) / self._log_base)
if masked.mask.any():
return ma.where(masked.mask, a * self._linscale_adj, log)
else:
return log
def inverted(self):
return InvertedSymmetricalLogTransform(self.base, self.linthresh,
self.linscale)
class InvertedSymmetricalLogTransform(Transform):
input_dims = 1
output_dims = 1
is_separable = True
has_inverse = True
def __init__(self, base, linthresh, linscale):
Transform.__init__(self)
symlog = SymmetricalLogTransform(base, linthresh, linscale)
self.base = base
self.linthresh = linthresh
self.invlinthresh = symlog.transform(linthresh)
self.linscale = linscale
self._linscale_adj = (linscale / (1.0 - self.base ** -1))
def transform_non_affine(self, a):
sign = np.sign(a)
masked = ma.masked_inside(a, -self.invlinthresh,
self.invlinthresh, copy=False)
exp = sign * self.linthresh * (
ma.power(self.base, (sign * (masked / self.linthresh))
- self._linscale_adj))
if masked.mask.any():
return ma.where(masked.mask, a / self._linscale_adj, exp)
else:
return exp
def inverted(self):
return SymmetricalLogTransform(self.base,
self.linthresh, self.linscale)
class SymmetricalLogScale(ScaleBase):
"""
The symmetrical logarithmic scale is logarithmic in both the
positive and negative directions from the origin.
Since the values close to zero tend toward infinity, there is a
need to have a range around zero that is linear. The parameter
*linthresh* allows the user to specify the size of this range
(-*linthresh*, *linthresh*).
"""
name = 'symlog'
# compatibility shim
SymmetricalLogTransform = SymmetricalLogTransform
InvertedSymmetricalLogTransform = InvertedSymmetricalLogTransform
def __init__(self, axis, **kwargs):
"""
*basex*/*basey*:
The base of the logarithm
*linthreshx*/*linthreshy*:
The range (-*x*, *x*) within which the plot is linear (to
avoid having the plot go to infinity around zero).
*subsx*/*subsy*:
Where to place the subticks between each major tick.
Should be a sequence of integers. For example, in a log10
scale: ``[2, 3, 4, 5, 6, 7, 8, 9]``
will place 8 logarithmically spaced minor ticks between
each major tick.
*linscalex*/*linscaley*:
This allows the linear range (-*linthresh* to *linthresh*)
to be stretched relative to the logarithmic range. Its
value is the number of decades to use for each half of the
linear range. For example, when *linscale* == 1.0 (the
default), the space used for the positive and negative
halves of the linear range will be equal to one decade in
the logarithmic range.
"""
if axis.axis_name == 'x':
base = kwargs.pop('basex', 10.0)
linthresh = kwargs.pop('linthreshx', 2.0)
subs = kwargs.pop('subsx', None)
linscale = kwargs.pop('linscalex', 1.0)
else:
base = kwargs.pop('basey', 10.0)
linthresh = kwargs.pop('linthreshy', 2.0)
subs = kwargs.pop('subsy', None)
linscale = kwargs.pop('linscaley', 1.0)
if base <= 1.0:
raise ValueError("'basex/basey' must be larger than 1")
if linthresh <= 0.0:
raise ValueError("'linthreshx/linthreshy' must be positive")
if linscale <= 0.0:
raise ValueError("'linscalex/linthreshy' must be positive")
self._transform = self.SymmetricalLogTransform(base,
linthresh,
linscale)
self.base = base
self.linthresh = linthresh
self.linscale = linscale
self.subs = subs
def set_default_locators_and_formatters(self, axis):
"""
Set the locators and formatters to specialized versions for
symmetrical log scaling.
"""
axis.set_major_locator(SymmetricalLogLocator(self.get_transform()))
axis.set_major_formatter(LogFormatterMathtext(self.base))
axis.set_minor_locator(SymmetricalLogLocator(self.get_transform(),
self.subs))
axis.set_minor_formatter(NullFormatter())
def get_transform(self):
"""
Return a :class:`SymmetricalLogTransform` instance.
"""
return self._transform
def _mask_non_logit(a):
"""
Return a Numpy array where all values outside ]0, 1[ are
replaced with NaNs. If all values are inside ]0, 1[, the original
array is returned.
"""
mask = (a <= 0.0) | (a >= 1.0)
if mask.any():
return np.where(mask, np.nan, a)
return a
def _clip_non_logit(a):
a = np.array(a, float)
a[a <= 0.0] = 1e-300
a[a >= 1.0] = 1 - 1e-300
return a
class LogitTransform(Transform):
input_dims = 1
output_dims = 1
is_separable = True
has_inverse = True
def __init__(self, nonpos):
Transform.__init__(self)
if nonpos == 'mask':
self._handle_nonpos = _mask_non_logit
else:
self._handle_nonpos = _clip_non_logit
self._nonpos = nonpos
def transform_non_affine(self, a):
"""logit transform (base 10), masked or clipped"""
a = self._handle_nonpos(a)
return np.log10(1.0 * a / (1.0 - a))
def inverted(self):
return LogisticTransform(self._nonpos)
class LogisticTransform(Transform):
input_dims = 1
output_dims = 1
is_separable = True
has_inverse = True
def __init__(self, nonpos='mask'):
Transform.__init__(self)
self._nonpos = nonpos
def transform_non_affine(self, a):
"""logistic transform (base 10)"""
return 1.0 / (1 + 10**(-a))
def inverted(self):
return LogitTransform(self._nonpos)
class LogitScale(ScaleBase):
"""
Logit scale for data between zero and one, both excluded.
This scale is similar to a log scale close to zero and to one, and almost
linear around 0.5. It maps the interval ]0, 1[ onto ]-infty, +infty[.
"""
name = 'logit'
def __init__(self, axis, nonpos='mask'):
"""
*nonpos*: ['mask' | 'clip' ]
values beyond ]0, 1[ can be masked as invalid, or clipped to a number
very close to 0 or 1
"""
if nonpos not in ['mask', 'clip']:
raise ValueError("nonposx, nonposy kwarg must be 'mask' or 'clip'")
self._transform = LogitTransform(nonpos)
def get_transform(self):
"""
Return a :class:`LogitTransform` instance.
"""
return self._transform
def set_default_locators_and_formatters(self, axis):
# ..., 0.01, 0.1, 0.5, 0.9, 0.99, ...
axis.set_major_locator(LogitLocator())
axis.set_major_formatter(LogitFormatter())
axis.set_minor_locator(LogitLocator(minor=True))
axis.set_minor_formatter(LogitFormatter())
def limit_range_for_scale(self, vmin, vmax, minpos):
"""
Limit the domain to values between 0 and 1 (excluded).
"""
return (vmin <= 0 and minpos or vmin,
vmax >= 1 and (1 - minpos) or vmax)
_scale_mapping = {
'linear': LinearScale,
'log': LogScale,
'symlog': SymmetricalLogScale,
'logit': LogitScale,
}
def get_scale_names():
names = list(six.iterkeys(_scale_mapping))
names.sort()
return names
def scale_factory(scale, axis, **kwargs):
"""
Return a scale class by name.
ACCEPTS: [ %(names)s ]
"""
scale = scale.lower()
if scale is None:
scale = 'linear'
if scale not in _scale_mapping:
raise ValueError("Unknown scale type '%s'" % scale)
return _scale_mapping[scale](axis, **kwargs)
scale_factory.__doc__ = dedent(scale_factory.__doc__) % \
{'names': " | ".join(get_scale_names())}
def register_scale(scale_class):
"""
Register a new kind of scale.
*scale_class* must be a subclass of :class:`ScaleBase`.
"""
_scale_mapping[scale_class.name] = scale_class
def get_scale_docs():
"""
Helper function for generating docstrings related to scales.
"""
docs = []
for name in get_scale_names():
scale_class = _scale_mapping[name]
docs.append(" '%s'" % name)
docs.append("")
class_docs = dedent(scale_class.__init__.__doc__)
class_docs = "".join([" %s\n" %
x for x in class_docs.split("\n")])
docs.append(class_docs)
docs.append("")
return "\n".join(docs)
docstring.interpd.update(
scale=' | '.join([repr(x) for x in get_scale_names()]),
scale_docs=get_scale_docs().rstrip(),
)
|
mit
|
xuleiboy1234/autoTitle
|
tensorflow/tensorflow/contrib/learn/python/learn/learn_io/pandas_io_test.py
|
111
|
7865
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for pandas_io."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.learn.python.learn.learn_io import pandas_io
from tensorflow.python.framework import errors
from tensorflow.python.platform import test
from tensorflow.python.training import coordinator
from tensorflow.python.training import queue_runner_impl
# pylint: disable=g-import-not-at-top
try:
import pandas as pd
HAS_PANDAS = True
except ImportError:
HAS_PANDAS = False
class PandasIoTest(test.TestCase):
def makeTestDataFrame(self):
index = np.arange(100, 104)
a = np.arange(4)
b = np.arange(32, 36)
x = pd.DataFrame({'a': a, 'b': b}, index=index)
y = pd.Series(np.arange(-32, -28), index=index)
return x, y
def callInputFnOnce(self, input_fn, session):
results = input_fn()
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(session, coord=coord)
result_values = session.run(results)
coord.request_stop()
coord.join(threads)
return result_values
def testPandasInputFn_IndexMismatch(self):
if not HAS_PANDAS:
return
x, _ = self.makeTestDataFrame()
y_noindex = pd.Series(np.arange(-32, -28))
with self.assertRaises(ValueError):
pandas_io.pandas_input_fn(
x, y_noindex, batch_size=2, shuffle=False, num_epochs=1)
def testPandasInputFn_ProducesExpectedOutputs(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
x, y = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=1)
features, target = self.callInputFnOnce(input_fn, session)
self.assertAllEqual(features['a'], [0, 1])
self.assertAllEqual(features['b'], [32, 33])
self.assertAllEqual(target, [-32, -31])
def testPandasInputFn_ProducesOutputsForLargeBatchAndMultipleEpochs(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
index = np.arange(100, 102)
a = np.arange(2)
b = np.arange(32, 34)
x = pd.DataFrame({'a': a, 'b': b}, index=index)
y = pd.Series(np.arange(-32, -30), index=index)
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=128, shuffle=False, num_epochs=2)
results = input_fn()
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(session, coord=coord)
features, target = session.run(results)
self.assertAllEqual(features['a'], [0, 1, 0, 1])
self.assertAllEqual(features['b'], [32, 33, 32, 33])
self.assertAllEqual(target, [-32, -31, -32, -31])
with self.assertRaises(errors.OutOfRangeError):
session.run(results)
coord.request_stop()
coord.join(threads)
def testPandasInputFn_ProducesOutputsWhenDataSizeNotDividedByBatchSize(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
index = np.arange(100, 105)
a = np.arange(5)
b = np.arange(32, 37)
x = pd.DataFrame({'a': a, 'b': b}, index=index)
y = pd.Series(np.arange(-32, -27), index=index)
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=1)
results = input_fn()
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(session, coord=coord)
features, target = session.run(results)
self.assertAllEqual(features['a'], [0, 1])
self.assertAllEqual(features['b'], [32, 33])
self.assertAllEqual(target, [-32, -31])
features, target = session.run(results)
self.assertAllEqual(features['a'], [2, 3])
self.assertAllEqual(features['b'], [34, 35])
self.assertAllEqual(target, [-30, -29])
features, target = session.run(results)
self.assertAllEqual(features['a'], [4])
self.assertAllEqual(features['b'], [36])
self.assertAllEqual(target, [-28])
with self.assertRaises(errors.OutOfRangeError):
session.run(results)
coord.request_stop()
coord.join(threads)
def testPandasInputFn_OnlyX(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
x, _ = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y=None, batch_size=2, shuffle=False, num_epochs=1)
features = self.callInputFnOnce(input_fn, session)
self.assertAllEqual(features['a'], [0, 1])
self.assertAllEqual(features['b'], [32, 33])
def testPandasInputFn_ExcludesIndex(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
x, y = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=1)
features, _ = self.callInputFnOnce(input_fn, session)
self.assertFalse('index' in features)
def assertInputsCallableNTimes(self, input_fn, session, n):
inputs = input_fn()
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(session, coord=coord)
for _ in range(n):
session.run(inputs)
with self.assertRaises(errors.OutOfRangeError):
session.run(inputs)
coord.request_stop()
coord.join(threads)
def testPandasInputFn_RespectsEpoch_NoShuffle(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
x, y = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=4, shuffle=False, num_epochs=1)
self.assertInputsCallableNTimes(input_fn, session, 1)
def testPandasInputFn_RespectsEpoch_WithShuffle(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
x, y = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=4, shuffle=True, num_epochs=1)
self.assertInputsCallableNTimes(input_fn, session, 1)
def testPandasInputFn_RespectsEpoch_WithShuffleAutosize(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
x, y = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=True, queue_capacity=None, num_epochs=2)
self.assertInputsCallableNTimes(input_fn, session, 4)
def testPandasInputFn_RespectsEpochUnevenBatches(self):
if not HAS_PANDAS:
return
x, y = self.makeTestDataFrame()
with self.test_session() as session:
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=3, shuffle=False, num_epochs=1)
# Before the last batch, only one element of the epoch should remain.
self.assertInputsCallableNTimes(input_fn, session, 2)
def testPandasInputFn_Idempotent(self):
if not HAS_PANDAS:
return
x, y = self.makeTestDataFrame()
for _ in range(2):
pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=1)()
for _ in range(2):
pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=True, num_epochs=1)()
if __name__ == '__main__':
test.main()
|
mit
|
MatthewThe/kaggle
|
titanic/bin/visualise.py
|
1
|
1648
|
#!/usr/bin/python
import csv
import matplotlib.pyplot as plt
import numpy as np
idx = 0
# 0: PassengerId, 1: Survived, 2: Pclass, 3: Name, 4: Sex (male,female), 5: Age, 6: SibSp, 7: ParCh, 8: Ticket, 9: Fare, 10: Cabin, 11: Embarked (S,C,Q)
# 1,0,3,"Braund, Mr. Owen Harris",male,22,1,0,A/5 21171,7.25,,S
def plotHist(col, edges, title):
global idx
x = []
y = []
missingCount = 0
reader = csv.reader(open('../data/train.csv', 'rb'))
headers = reader.next()
for row in reader:
y.append(int(row[1]))
if len(row[col]) > 0:
if col == 4:
row[col] = 1 if row[col] == "male" else 0
elif col == 11:
m = {"S": 1, "C": 2, "Q": 3}
row[col] = m[row[col]]
x.append(float(row[col]))
else:
x.append(-8.0)
missingCount += 1
if idx == 0:
print "Total"
print "#Passengers", len(y)
print "Survived", sum(y)
print "Deceased", len(y) - sum(y)
print ""
print title
print "#Missing value", missingCount
idx += 1
plt.subplot(3,3,idx)
plt.title(title)
plt.hist([a for a,b in zip(x,y) if b == 1], bins = edges, alpha = 0.5, label = 'survived')
plt.hist([a for a,b in zip(x,y) if b == 0], bins = edges, alpha = 0.5, label = 'deceased')
plt.legend()
# Pclass
plotHist(2, np.arange(0.5,4.5,1), "Pclass")
# Gender
plotHist(4, np.arange(-0.5,2.5,1), "Gender")
# Age
plotHist(5, range(-10,100,5), "Age")
# SibSp
plotHist(6, np.arange(-0.5,9.5,1), "Siblings/Spouse")
# ParCh
plotHist(7, np.arange(-0.5,9.5,1), "Parents/Chilren")
# Fare
plotHist(9, range(0,200,10), "Fare")
# Fare
plotHist(11, np.arange(0.5,4.5,1), "Embarked")
plt.show()
|
apache-2.0
|
Ziqi-Li/bknqgis
|
pandas/pandas/tests/dtypes/test_generic.py
|
2
|
3714
|
# -*- coding: utf-8 -*-
from warnings import catch_warnings
import numpy as np
import pandas as pd
from pandas.core.dtypes import generic as gt
from pandas.util import testing as tm
class TestABCClasses(object):
tuples = [[1, 2, 2], ['red', 'blue', 'red']]
multi_index = pd.MultiIndex.from_arrays(tuples, names=('number', 'color'))
datetime_index = pd.to_datetime(['2000/1/1', '2010/1/1'])
timedelta_index = pd.to_timedelta(np.arange(5), unit='s')
period_index = pd.period_range('2000/1/1', '2010/1/1/', freq='M')
categorical = pd.Categorical([1, 2, 3], categories=[2, 3, 1])
categorical_df = pd.DataFrame({"values": [1, 2, 3]}, index=categorical)
df = pd.DataFrame({'names': ['a', 'b', 'c']}, index=multi_index)
sparse_series = pd.Series([1, 2, 3]).to_sparse()
sparse_array = pd.SparseArray(np.random.randn(10))
def test_abc_types(self):
assert isinstance(pd.Index(['a', 'b', 'c']), gt.ABCIndex)
assert isinstance(pd.Int64Index([1, 2, 3]), gt.ABCInt64Index)
assert isinstance(pd.UInt64Index([1, 2, 3]), gt.ABCUInt64Index)
assert isinstance(pd.Float64Index([1, 2, 3]), gt.ABCFloat64Index)
assert isinstance(self.multi_index, gt.ABCMultiIndex)
assert isinstance(self.datetime_index, gt.ABCDatetimeIndex)
assert isinstance(self.timedelta_index, gt.ABCTimedeltaIndex)
assert isinstance(self.period_index, gt.ABCPeriodIndex)
assert isinstance(self.categorical_df.index, gt.ABCCategoricalIndex)
assert isinstance(pd.Index(['a', 'b', 'c']), gt.ABCIndexClass)
assert isinstance(pd.Int64Index([1, 2, 3]), gt.ABCIndexClass)
assert isinstance(pd.Series([1, 2, 3]), gt.ABCSeries)
assert isinstance(self.df, gt.ABCDataFrame)
with catch_warnings(record=True):
assert isinstance(self.df.to_panel(), gt.ABCPanel)
assert isinstance(self.sparse_series, gt.ABCSparseSeries)
assert isinstance(self.sparse_array, gt.ABCSparseArray)
assert isinstance(self.categorical, gt.ABCCategorical)
assert isinstance(pd.Period('2012', freq='A-DEC'), gt.ABCPeriod)
assert isinstance(pd.DateOffset(), gt.ABCDateOffset)
assert isinstance(pd.Period('2012', freq='A-DEC').freq,
gt.ABCDateOffset)
assert not isinstance(pd.Period('2012', freq='A-DEC'),
gt.ABCDateOffset)
def test_setattr_warnings():
# GH5904 - Suggestion: Warning for DataFrame colname-methodname clash
# GH7175 - GOTCHA: You can't use dot notation to add a column...
d = {'one': pd.Series([1., 2., 3.], index=['a', 'b', 'c']),
'two': pd.Series([1., 2., 3., 4.], index=['a', 'b', 'c', 'd'])}
df = pd.DataFrame(d)
with catch_warnings(record=True) as w:
# successfully add new column
# this should not raise a warning
df['three'] = df.two + 1
assert len(w) == 0
assert df.three.sum() > df.two.sum()
with catch_warnings(record=True) as w:
# successfully modify column in place
# this should not raise a warning
df.one += 1
assert len(w) == 0
assert df.one.iloc[0] == 2
with catch_warnings(record=True) as w:
# successfully add an attribute to a series
# this should not raise a warning
df.two.not_an_index = [1, 2]
assert len(w) == 0
with tm.assert_produces_warning(UserWarning):
# warn when setting column to nonexistent name
df.four = df.two + 2
assert df.four.sum() > df.two.sum()
with tm.assert_produces_warning(UserWarning):
# warn when column has same name as method
df['sum'] = df.two
|
gpl-2.0
|
datapythonista/pandas
|
pandas/tests/series/methods/test_sort_index.py
|
2
|
12500
|
import random
import numpy as np
import pytest
from pandas import (
DatetimeIndex,
IntervalIndex,
MultiIndex,
Series,
)
import pandas._testing as tm
@pytest.fixture(params=["quicksort", "mergesort", "heapsort", "stable"])
def sort_kind(request):
return request.param
class TestSeriesSortIndex:
def test_sort_index_name(self, datetime_series):
result = datetime_series.sort_index(ascending=False)
assert result.name == datetime_series.name
def test_sort_index(self, datetime_series):
datetime_series.index = datetime_series.index._with_freq(None)
rindex = list(datetime_series.index)
random.shuffle(rindex)
random_order = datetime_series.reindex(rindex)
sorted_series = random_order.sort_index()
tm.assert_series_equal(sorted_series, datetime_series)
# descending
sorted_series = random_order.sort_index(ascending=False)
tm.assert_series_equal(
sorted_series, datetime_series.reindex(datetime_series.index[::-1])
)
# compat on level
sorted_series = random_order.sort_index(level=0)
tm.assert_series_equal(sorted_series, datetime_series)
# compat on axis
sorted_series = random_order.sort_index(axis=0)
tm.assert_series_equal(sorted_series, datetime_series)
msg = "No axis named 1 for object type Series"
with pytest.raises(ValueError, match=msg):
random_order.sort_values(axis=1)
sorted_series = random_order.sort_index(level=0, axis=0)
tm.assert_series_equal(sorted_series, datetime_series)
with pytest.raises(ValueError, match=msg):
random_order.sort_index(level=0, axis=1)
def test_sort_index_inplace(self, datetime_series):
datetime_series.index = datetime_series.index._with_freq(None)
# For GH#11402
rindex = list(datetime_series.index)
random.shuffle(rindex)
# descending
random_order = datetime_series.reindex(rindex)
result = random_order.sort_index(ascending=False, inplace=True)
assert result is None
expected = datetime_series.reindex(datetime_series.index[::-1])
expected.index = expected.index._with_freq(None)
tm.assert_series_equal(random_order, expected)
# ascending
random_order = datetime_series.reindex(rindex)
result = random_order.sort_index(ascending=True, inplace=True)
assert result is None
expected = datetime_series.copy()
expected.index = expected.index._with_freq(None)
tm.assert_series_equal(random_order, expected)
def test_sort_index_level(self):
mi = MultiIndex.from_tuples([[1, 1, 3], [1, 1, 1]], names=list("ABC"))
s = Series([1, 2], mi)
backwards = s.iloc[[1, 0]]
res = s.sort_index(level="A")
tm.assert_series_equal(backwards, res)
res = s.sort_index(level=["A", "B"])
tm.assert_series_equal(backwards, res)
res = s.sort_index(level="A", sort_remaining=False)
tm.assert_series_equal(s, res)
res = s.sort_index(level=["A", "B"], sort_remaining=False)
tm.assert_series_equal(s, res)
@pytest.mark.parametrize("level", ["A", 0]) # GH#21052
def test_sort_index_multiindex(self, level):
mi = MultiIndex.from_tuples([[1, 1, 3], [1, 1, 1]], names=list("ABC"))
s = Series([1, 2], mi)
backwards = s.iloc[[1, 0]]
# implicit sort_remaining=True
res = s.sort_index(level=level)
tm.assert_series_equal(backwards, res)
# GH#13496
# sort has no effect without remaining lvls
res = s.sort_index(level=level, sort_remaining=False)
tm.assert_series_equal(s, res)
def test_sort_index_kind(self, sort_kind):
# GH#14444 & GH#13589: Add support for sort algo choosing
series = Series(index=[3, 2, 1, 4, 3], dtype=object)
expected_series = Series(index=[1, 2, 3, 3, 4], dtype=object)
index_sorted_series = series.sort_index(kind=sort_kind)
tm.assert_series_equal(expected_series, index_sorted_series)
def test_sort_index_na_position(self):
series = Series(index=[3, 2, 1, 4, 3, np.nan], dtype=object)
expected_series_first = Series(index=[np.nan, 1, 2, 3, 3, 4], dtype=object)
index_sorted_series = series.sort_index(na_position="first")
tm.assert_series_equal(expected_series_first, index_sorted_series)
expected_series_last = Series(index=[1, 2, 3, 3, 4, np.nan], dtype=object)
index_sorted_series = series.sort_index(na_position="last")
tm.assert_series_equal(expected_series_last, index_sorted_series)
def test_sort_index_intervals(self):
s = Series(
[np.nan, 1, 2, 3], IntervalIndex.from_arrays([0, 1, 2, 3], [1, 2, 3, 4])
)
result = s.sort_index()
expected = s
tm.assert_series_equal(result, expected)
result = s.sort_index(ascending=False)
expected = Series(
[3, 2, 1, np.nan], IntervalIndex.from_arrays([3, 2, 1, 0], [4, 3, 2, 1])
)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize(
"original_list, sorted_list, ascending, ignore_index, output_index",
[
([2, 3, 6, 1], [2, 3, 6, 1], True, True, [0, 1, 2, 3]),
([2, 3, 6, 1], [2, 3, 6, 1], True, False, [0, 1, 2, 3]),
([2, 3, 6, 1], [1, 6, 3, 2], False, True, [0, 1, 2, 3]),
([2, 3, 6, 1], [1, 6, 3, 2], False, False, [3, 2, 1, 0]),
],
)
def test_sort_index_ignore_index(
self, inplace, original_list, sorted_list, ascending, ignore_index, output_index
):
# GH 30114
ser = Series(original_list)
expected = Series(sorted_list, index=output_index)
kwargs = {
"ascending": ascending,
"ignore_index": ignore_index,
"inplace": inplace,
}
if inplace:
result_ser = ser.copy()
result_ser.sort_index(**kwargs)
else:
result_ser = ser.sort_index(**kwargs)
tm.assert_series_equal(result_ser, expected)
tm.assert_series_equal(ser, Series(original_list))
def test_sort_index_ascending_list(self):
# GH#16934
# Set up a Series with a three level MultiIndex
arrays = [
["bar", "bar", "baz", "baz", "foo", "foo", "qux", "qux"],
["one", "two", "one", "two", "one", "two", "one", "two"],
[4, 3, 2, 1, 4, 3, 2, 1],
]
tuples = zip(*arrays)
mi = MultiIndex.from_tuples(tuples, names=["first", "second", "third"])
ser = Series(range(8), index=mi)
# Sort with boolean ascending
result = ser.sort_index(level=["third", "first"], ascending=False)
expected = ser.iloc[[4, 0, 5, 1, 6, 2, 7, 3]]
tm.assert_series_equal(result, expected)
# Sort with list of boolean ascending
result = ser.sort_index(level=["third", "first"], ascending=[False, True])
expected = ser.iloc[[0, 4, 1, 5, 2, 6, 3, 7]]
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"ascending",
[
None,
(True, None),
(False, "True"),
],
)
def test_sort_index_ascending_bad_value_raises(self, ascending):
ser = Series(range(10), index=[0, 3, 2, 1, 4, 5, 7, 6, 8, 9])
match = 'For argument "ascending" expected type bool'
with pytest.raises(ValueError, match=match):
ser.sort_index(ascending=ascending)
class TestSeriesSortIndexKey:
def test_sort_index_multiindex_key(self):
mi = MultiIndex.from_tuples([[1, 1, 3], [1, 1, 1]], names=list("ABC"))
s = Series([1, 2], mi)
backwards = s.iloc[[1, 0]]
result = s.sort_index(level="C", key=lambda x: -x)
tm.assert_series_equal(s, result)
result = s.sort_index(level="C", key=lambda x: x) # nothing happens
tm.assert_series_equal(backwards, result)
def test_sort_index_multiindex_key_multi_level(self):
mi = MultiIndex.from_tuples([[1, 1, 3], [1, 1, 1]], names=list("ABC"))
s = Series([1, 2], mi)
backwards = s.iloc[[1, 0]]
result = s.sort_index(level=["A", "C"], key=lambda x: -x)
tm.assert_series_equal(s, result)
result = s.sort_index(level=["A", "C"], key=lambda x: x) # nothing happens
tm.assert_series_equal(backwards, result)
def test_sort_index_key(self):
series = Series(np.arange(6, dtype="int64"), index=list("aaBBca"))
result = series.sort_index()
expected = series.iloc[[2, 3, 0, 1, 5, 4]]
tm.assert_series_equal(result, expected)
result = series.sort_index(key=lambda x: x.str.lower())
expected = series.iloc[[0, 1, 5, 2, 3, 4]]
tm.assert_series_equal(result, expected)
result = series.sort_index(key=lambda x: x.str.lower(), ascending=False)
expected = series.iloc[[4, 2, 3, 0, 1, 5]]
tm.assert_series_equal(result, expected)
def test_sort_index_key_int(self):
series = Series(np.arange(6, dtype="int64"), index=np.arange(6, dtype="int64"))
result = series.sort_index()
tm.assert_series_equal(result, series)
result = series.sort_index(key=lambda x: -x)
expected = series.sort_index(ascending=False)
tm.assert_series_equal(result, expected)
result = series.sort_index(key=lambda x: 2 * x)
tm.assert_series_equal(result, series)
def test_sort_index_kind_key(self, sort_kind, sort_by_key):
# GH #14444 & #13589: Add support for sort algo choosing
series = Series(index=[3, 2, 1, 4, 3], dtype=object)
expected_series = Series(index=[1, 2, 3, 3, 4], dtype=object)
index_sorted_series = series.sort_index(kind=sort_kind, key=sort_by_key)
tm.assert_series_equal(expected_series, index_sorted_series)
def test_sort_index_kind_neg_key(self, sort_kind):
# GH #14444 & #13589: Add support for sort algo choosing
series = Series(index=[3, 2, 1, 4, 3], dtype=object)
expected_series = Series(index=[4, 3, 3, 2, 1], dtype=object)
index_sorted_series = series.sort_index(kind=sort_kind, key=lambda x: -x)
tm.assert_series_equal(expected_series, index_sorted_series)
def test_sort_index_na_position_key(self, sort_by_key):
series = Series(index=[3, 2, 1, 4, 3, np.nan], dtype=object)
expected_series_first = Series(index=[np.nan, 1, 2, 3, 3, 4], dtype=object)
index_sorted_series = series.sort_index(na_position="first", key=sort_by_key)
tm.assert_series_equal(expected_series_first, index_sorted_series)
expected_series_last = Series(index=[1, 2, 3, 3, 4, np.nan], dtype=object)
index_sorted_series = series.sort_index(na_position="last", key=sort_by_key)
tm.assert_series_equal(expected_series_last, index_sorted_series)
def test_changes_length_raises(self):
s = Series([1, 2, 3])
with pytest.raises(ValueError, match="change the shape"):
s.sort_index(key=lambda x: x[:1])
def test_sort_values_key_type(self):
s = Series([1, 2, 3], DatetimeIndex(["2008-10-24", "2008-11-23", "2007-12-22"]))
result = s.sort_index(key=lambda x: x.month)
expected = s.iloc[[0, 1, 2]]
tm.assert_series_equal(result, expected)
result = s.sort_index(key=lambda x: x.day)
expected = s.iloc[[2, 1, 0]]
tm.assert_series_equal(result, expected)
result = s.sort_index(key=lambda x: x.year)
expected = s.iloc[[2, 0, 1]]
tm.assert_series_equal(result, expected)
result = s.sort_index(key=lambda x: x.month_name())
expected = s.iloc[[2, 1, 0]]
tm.assert_series_equal(result, expected)
def test_sort_index_pos_args_deprecation(self):
# https://github.com/pandas-dev/pandas/issues/41485
ser = Series([1, 2, 3])
msg = (
r"In a future version of pandas all arguments of Series.sort_index "
r"will be keyword-only"
)
with tm.assert_produces_warning(FutureWarning, match=msg):
result = ser.sort_index(0)
expected = Series([1, 2, 3])
tm.assert_series_equal(result, expected)
|
bsd-3-clause
|
kharris/prebotc-graph-model
|
postprocessing_preBotBot/doCollectionPlots.py
|
1
|
2612
|
#!/usr/bin/env python
import scipy.io
import argparse
import sys
import numpy as np
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import os.path
import os
'''
This file looks at a given mat file with the final metrics over all runs given and will
plot this with the standard deviation error
'''
def parse_args(argv):
#parsing
parser = argparse.ArgumentParser(prog ="collectionPlots",
description = 'plotting collection data')
parser.add_argument('input', help = 'file contaning collection data (.mat)')
parser.add_argument('output', help='output filename .jpeg(graph type will be appended)')
args = parser.parse_args(argv[1:])
return args.input, args.output
'''
This arranges the data from the mat and gets the population correlation/phase lags, finds the
standard deviation and also the average to plot
'''
def arrange_popcor_phaselag(data,start_of_sweep,end_of_sweep,steps):
std_dev_phase = []
std_dev_pop = []
phase = []
pop = []
sweep_values = []
##Sweeps through the parameters you swept through
for i in np.arange(start_of_sweep,end_of_sweep,steps):
temp_phase = []
temp_corr = []
for j in data:
if float(str( j[0][0])) == i:
temp_phase.append(j[1][0])
temp_corr.append(j[2][0])
##stores the means and standard deviations of each sweep value
sweep_values.append(i)
phase.append(np.mean(temp_phase))
pop.append(np.mean(temp_corr))
std_dev_phase.append(np.std(temp_phase))
std_dev_pop.append(np.std(temp_corr))
return std_dev_phase,std_dev_pop,phase,pop,sweep_values
def main(argv=None):
if argv is None:
argv = sys.argv
(in_fn,out_fn) = parse_args(argv)
data = scipy.io.loadmat(in_fn)
##Get the values for the pop correlation and phase lag
(std_dev_phase,std_dev_pop,phase,pop,sweep_values) = arrange_popcor_phaselag(data['phase_lag_pop_corr'],0,3.25,.25)
###Graph the data
plt.figure(1)
ax = plt.subplot(111)
plt.errorbar(sweep_values,phase,std_dev_phase,marker = 'o' ,label ="Phase Shift")
plt.errorbar(sweep_values,pop,std_dev_pop,c='r',marker= 'o',label="Pop Correlation")
plt.title('Sweep of Block structure')
plt.xlabel('idegree')
plt.ylabel('Phase or Strength Value')
plt.axis([-.2,3.2,0,1])
ax.legend()
plt.savefig(out_fn)
if __name__ == '__main__':
status = main()
sys.exit(status)
|
bsd-3-clause
|
pompiduskus/scikit-learn
|
examples/semi_supervised/plot_label_propagation_versus_svm_iris.py
|
286
|
2378
|
"""
=====================================================================
Decision boundary of label propagation versus SVM on the Iris dataset
=====================================================================
Comparison for decision boundary generated on iris dataset
between Label Propagation and SVM.
This demonstrates Label Propagation learning a good boundary
even with a small amount of labeled data.
"""
print(__doc__)
# Authors: Clay Woolam <[email protected]>
# Licence: BSD
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn import svm
from sklearn.semi_supervised import label_propagation
rng = np.random.RandomState(0)
iris = datasets.load_iris()
X = iris.data[:, :2]
y = iris.target
# step size in the mesh
h = .02
y_30 = np.copy(y)
y_30[rng.rand(len(y)) < 0.3] = -1
y_50 = np.copy(y)
y_50[rng.rand(len(y)) < 0.5] = -1
# we create an instance of SVM and fit out data. We do not scale our
# data since we want to plot the support vectors
ls30 = (label_propagation.LabelSpreading().fit(X, y_30),
y_30)
ls50 = (label_propagation.LabelSpreading().fit(X, y_50),
y_50)
ls100 = (label_propagation.LabelSpreading().fit(X, y), y)
rbf_svc = (svm.SVC(kernel='rbf').fit(X, y), y)
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# title for the plots
titles = ['Label Spreading 30% data',
'Label Spreading 50% data',
'Label Spreading 100% data',
'SVC with rbf kernel']
color_map = {-1: (1, 1, 1), 0: (0, 0, .9), 1: (1, 0, 0), 2: (.8, .6, 0)}
for i, (clf, y_train) in enumerate((ls30, ls50, ls100, rbf_svc)):
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
plt.subplot(2, 2, i + 1)
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.axis('off')
# Plot also the training points
colors = [color_map[y] for y in y_train]
plt.scatter(X[:, 0], X[:, 1], c=colors, cmap=plt.cm.Paired)
plt.title(titles[i])
plt.text(.90, 0, "Unlabeled points are colored white")
plt.show()
|
bsd-3-clause
|
emilroz/openmicroscopy
|
components/tools/OmeroPy/src/flim-omero.py
|
5
|
31427
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
components/tools/OmeroPy/scripts/omero/analysis_scripts/FLIM.py
-----------------------------------------------------------------------------
Copyright (C) 2006-2010 University of Dundee. All rights reserved.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
------------------------------------------------------------------------------
This script performs FRET analysis on FLIM images calculating Decay curve differences
between control and experimental datasets.
@author Pieta Schofield
<a href="mailto:[email protected]">[email protected]</a>
@author Donald MacDonald
<a href="mailto:[email protected]">[email protected]</a>
@version 3.0
<small>
(<b>Internal version:</b> $Revision: $Date: $)
</small>
@since 3.0-Beta4.1
"""
from mpi4py import MPI as mpi
import warnings as wrn
import matplotlib as mpl
mpl.use('Agg')
import time as tim
import resource as res
import sys as sys
import getopt as gop
import os as os
import itertools as itr
import numpy as npy
from matplotlib import pylab as plt
from matplotlib import colors as col
from scipy import optimize as opz
from scipy import ndimage as ndi
from scipy import cluster as clt
import mpfit as mpf
# OMERO Imports
import omero.clients
import omero.util.pixelstypetopython as pixelstypetopython
from omero.rtypes import *
import omero_ext.uuid as uuid # see ticket:3774
# Script Utility helper methods.
import omero.util.script_utils as script_utils
# Image saving and loading Imports
from struct import *
NAMESPACE = omero.constants.analysis.flim.NSFLIM;
try:
from PIL import Image, ImageDraw # see ticket:2597
except ImportError:
import Image, ImageDraw # see ticket:2597
# eps (epsilon) is a magic small number to avoid getting division by zero warnings from the mpfit calls
eps=0.000000001
# mycmdata1 and cycm1 are custom colormaps for generating plots in matplotlib of the numpy arrays
# of the spatial parameter distributions (the colours are chosen to make nice inverted images for presentations
# rather than nice look nice in their uninverted form.
mycmdata1 = {'red' : ((0., 1., 1.), (0.75, 0.5, 0.5), (1., 0.25, 0.)),
'green': ((0., 1., 1.), (0.5, 0.5, 0.5), (1., 0., 0.)),
'blue' : ((0., 1., 1.), (0.25, 0.5, 0.5), (1., 0.25, 0.))}
cycm1 = col.LinearSegmentedColormap('mycm', mycmdata1)
# strings for writing parameter names to the tex file
paranames=['$\\alpha_{1}$','$\\tau_{1}$','$b$','$\\alpha_{2}$','$\\tau_{2}$']
# A functions to locate the index of the maximum value in an array
argmax = lambda a: max(itr.izip(a, xrange(len(a))))[1]
# The following functions are the single exponential and the double exponential functions
# and the wrapper functions for the mpfit program.
# Single exponential decay function convolved with irf
cexp_1= lambda p,x, i: npy.convolve(p[0]*npy.e**(-x/(p[1]+eps))+p[2],i[::-1],mode=2)
# Single exponential decay function wrapped for calling by mpfit
def err_1(p,fjac=None,x=None,y=None,err=None,i=None,sh=None,sm=None,lf=None):
err=None
model = cexp_1(p,x,i)
status = 0
if err==None:
return [status, (y[sm:lf]-model[sm+sh:sh+lf])]
else:
return [status, (y[sm:lf]-model[sm+sh:sh+lf])/err[sm:lf]]
# Double exponential decay function convolved with irf
cexp_2 = lambda p,x,i: npy.convolve(p[0]*npy.e**(-x/(p[1]+eps))+p[3]*npy.e**(-x/(p[4]+eps))+p[2],i[::-1],mode=2)
# Double exponential decay function wrapped for calling by mpfit
def err_2(p,fjac=None,x=None,y=None,err=None,i=None,sh=None,sm=None,lf=None):
err=None
model = cexp_2(p,x,i)
status = 0
if err==None:
return [status, (y[sm:lf]-model[sm+sh:sh+lf])]
else:
return [status, (y[sm:lf]-model[sm+sh:sh+lf])/err[sm:lf]]
# This function fits a numpy array s representing the signal and e respresenting the variance against a model
# given by mode over time points given by a_tp. It returns a large number of values so that the global average
# fit can be plotted and incorporated into the tex
def procFitIt(s,e,a_tp,mode,d_args,par=None):
# Setup arrays to pass to fit function
i_slice=len(a_tp)
# create 2n long arrays for convolution of model with instrument response fucntion
sig=npy.zeros(i_slice*2)
err=npy.ones(i_slice*2)
se=npy.zeros(i_slice*2)
tp=npy.zeros(i_slice*2)
irf=npy.zeros(i_slice*2)
# fill in the values from the signal and time points from irf
sig[0:i_slice]=s[0:i_slice]
tp[0:i_slice]=a_tp[0:i_slice,0]
err[0:i_slice]=e[0:i_slice]
irf[int(i_slice*0.5):int(i_slice*1.5)] = a_tp[0:i_slice,1]
# Fill up time points missing time points
textra=npy.array(range(i_slice),npy.float)
textra+=1.0
textra*=float(tp[2]-tp[1])
textra+=tp[i_slice-1]
tp[i_slice:]=textra
# Select which model to fit
if mode>1:
# fit double exponential (FRET condition fit) if there are no initial parameters generate them
# THESE ARE MAGIC NUMBER DEFAULTS FOR INITAL VALUES
# At this point for average initial fit calculate the shift to align the peak of the convolved model signal
# with the peak of the actual signal
if par==None:
par = npy.array([1.0, d_args['n']/2.0, 0.0, 1.0, d_args['n']],'d')
cf=cexp_2(par,tp,irf)
smax=argmax(sig)
cmax=argmax(cf)
shift=cmax-smax
else:
smax=par[-2]
shift=par[-1]
# set up parameters and constriants for mpfit
params = [{'value':par[0], 'fixed':0, 'limited':[1,0], 'limits':[0.,0.]},
{'value':par[1], 'fixed':0, 'limited':[1,0], 'limits':[0.,0.]},
{'value':par[2], 'fixed':0, 'limited':[1,0], 'limits':[0.,0.]},
{'value':par[3], 'fixed':0, 'limited':[1,0], 'limits':[0.,0.]},
{'value':par[4], 'fixed':1, 'limited':[1,0], 'limits':[0.,0.]}]
# create dictionary of values to send to fit function
fa={'x':tp, 'y':sig, 'i':irf, 'sh':shift, 'sm':smax,'err':se,'lf':i_slice}
# DO THE FIT
fit = mpf.mpfit(err_2,parinfo=params,functkw=fa,quiet=True)
cf1=cexp_2(fit.params,tp,irf)
else:
# Single exponential (NO FRET) fit
# THESE ARE MAGIC NUMBER DEFAULTS FOR INITAL VALUES
# At this point for average initial fit calculate the shift to align the peak of the convolved model signal
# with the peak of the actual signal
if par==None:
par = npy.array([1.0, d_args['n'], 0.0],'d')
cf=cexp_1(par,tp,irf)
smax=argmax(sig)
cmax=argmax(cf)
shift=cmax-smax
else:
smax=par[-2]
shift=par[-1]
# set up parameters and constraints for mpfit
params = [{'value':par[0], 'fixed':0, 'limited':[1,0], 'limits':[0.,0.]},
{'value':par[1], 'fixed':0, 'limited':[1,0], 'limits':[0.,0.]},
{'value':par[2], 'fixed':0, 'limited':[1,0], 'limits':[0.,0.]}]
# create dictionary of values to send to fit function
fa={'x':tp, 'y':sig, 'i':irf, 'sh':shift, 'sm':smax,'err':err,'lf':i_slice}
# DO THE FIT
fit = mpf.mpfit(err_1,parinfo=params,functkw=fa,quiet=True)
cf1=cexp_1(fit.params,tp,irf)
# create fitted parameter array to pass back
# Here a lot of values are returned so that higher level calls can lot the fit
fpar=[]
for i in range(len(fit.params)):
fpar.append(fit.params[i])
fpar.append(smax)
fpar.append(shift)
return [fit,tp[0:i_slice],cf1[shift:shift+i_slice],sig[0:i_slice],se[0:i_slice],npy.array(fpar)]
# This routine performs a k-means segmentation of the average projection of the data to identify the focal pixels
# it is passed the whole file as rawdata and the parameter dictionary so it can know the structure of the data
# spatial dimensions , time points.
# it also makes an initial fit of the global averaged data for focal pixels and hence needs to know the model to use
def procSEG(rawdata,a_tp,d_args):
i_res=d_args['s']
i_slice=d_args['t']
i_mode=d_args['m']
shape=(i_slice,i_res*i_res)
ndata=npy.zeros(i_res*i_res*i_slice,dtype=npy.float).reshape(shape)
# find cell boundary use stdev and average projections for K-means thresholding on normalized image stacks
nave=clt.vq.whiten(npy.average(rawdata,axis=0))
tabck,tacel=npy.sort(clt.vq.kmeans(nave,2)[0])
th1=1.75*tabck+tacel*0.25
th2=1.25*tabck+tacel*0.75
# Whole cell Thresholding
obck=npy.where(nave<th1,1,0)
ocel=npy.where(nave<th1,0,1)
ncel=len(ocel)
# At this point i would be possible to segment the images further with call scipy watershedding and labeling
# to generate separate regions of interest for each cell with in the image. However this would have knock on
# effect on distribution of pixels in mpi. Also watershedding might need manual tuning to prevent shattering
# cells
# Calculate average focal pixel intensities and variance
zabck=npy.zeros(i_slice,dtype=npy.float)
s1bck=npy.zeros(i_slice,dtype=npy.float)
for i in range(i_slice):
s1bck[i]=ndi.variance(rawdata[i],obck)
ndata[i]=rawdata[i]-ndi.mean(rawdata[i],obck)
#
# Cell
#
zacel=npy.zeros(i_slice,dtype=npy.float)
s1cel=npy.zeros(i_slice,dtype=npy.float)
for i in range(i_slice):
zacel[i]=ndi.mean(ndata[i],ocel)
s1cel[i]=npy.sqrt(ndi.variance(ndata[i],ocel)+s1bck[i])
# initialize signal, error and time points
# Fit cell average to get ball park figures
initfit,tp,cf1,sig,se,fpar=procFitIt(zacel,s1cel,a_tp,i_mode,d_args,None)
return [ocel.reshape((i_res,i_res)),initfit,tp,cf1,sig,se,fpar]
# This function reads the whole file of pixels in for each node in mpi cluster and returns a numpy array
def procRead(rawPixelsStore, pixels):
id = pixels.getId().getValue();
sizeC = pixels.getSizeC().getValue();
sizeX = pixels.getSizeX().getValue();
sizeY = pixels.getSizeY().getValue();
imageCXY = script_utils.readFlimImageFile(rawPixelsStore, pixels);
imageCD = imageCXY.reshape(sizeC,sizeX*sizeY);
return imageCD
#fd=open(d_args['f']+'.raw','rb')
#shape=(d_args['t'],d_args['s']*d_args['s'])
#rawdata = npy.fromfile(file=fd, dtype=npy.uint16).byteswap().reshape(shape)
#return rawdata
# This function reads in the irf file and return
def procTP(rawFileStore, queryService, instrumentResponseFileId):
return script_utils.readFileAsArray(rawFileStore, queryService, instrumentResponseFileId, 256, 2, separator = ' ');
def createSession(host, user, password):
client = omero.client(host);
session = client.createSession(user, password);
return client, session;
def pixelsInDataset(containerService, datasetId):
Images = containerService.getImages('DatasetI',[datasetId],None,None)
pixels = [];
for image in Images:
pixels.append(image.getPixels(0));
return pixels;
def imagesInDataset(containerService, datasetId):
images = containerService.getImages('DatasetI',[datasetId],None,None)
return images;
# This is the main mpi script
def getParentProject(containerService, datasetId):
projectList = containerService.findContainerHierarchies('Project',[datasetId],None,None);
return projectList[0];
def getDataset(containerService, datasetId):
datasetList = containerService.loadContainerHierarchy('Dataset',[datasetId],None,None)
return datasetList[0];
def getProject(containerService, projectId):
projectList = containerService.loadContainerHierarchy('Project',[projectId],None,None)
return projectList[0];
def getImage(gateway, imageId):
return gateway.getImage(imageId)
def attachFileToDataset(containerService, queryService, updateService, rawFileStore, datasetID, localName):
dataset = getDataset(containerService, datasetID);
return script_utils.uploadAndAttachFile(queryService, updateService, rawFileStore, dataset, localName, 'PDF', description=None, namespace=None, origFilePathName=None);
def attachFileToProject(containerService, queryService, updateService, rawFileStore, projectId, localName):
project = getProject(containerService, projectId);
return script_utils.uploadAndAttachFile(queryService, updateService, rawFileStore, project, localName, 'PDF', description=None, namespace=None, origFilePathName=None);
def attachFileToImage(gateway, queryService, updateService, rawFileStore, imageID, localName):
image = getImage(gateway, imageID);
return script_utils.uploadAndAttachFile(queryService, updateService, rawFileStore, image, localName, 'CSV', description=None, namespace=NAMESPACE, origFilePathName=None);
def getImageIdFromPixels(gateway, pixelId):
return gateway.getPixels(pixelsId).getImage().getId().getValue();
def mpi_run(argv):
texfilename = 'test_n.tex';
# set up mpi world recover number of nodes and rank of each process
# Process of rank=0 acts as co-ordinating process and does the latex generation
comm = mpi.COMM_WORLD
size = comm.Get_size()
rank = comm.Get_rank()
name= mpi.Get_processor_name()
# get the command line arguments
try:
opts, args = gop.getopt(argv, "h:u:p:c:e:r:o", ["host","user","password","control","experiment", "rfid", "outputfilename"])
except gop.error, msg:
print >>sys.stderr, msg
return 2
s_dir = str(uuid.uuid1())+'/';
d_args={}
d_args['d'] = s_dir;
for opt, arg in opts:
if opt in ("-h", "--host"):
host=arg.strip()
d_args['host']=host
if opt in ("-u", "--user"):
user=arg.strip()
d_args['user']=user
if opt in ("-p", "--password"):
password=arg.strip()
d_args['password']=password
if opt in ("-c", "--control"):
control=arg.strip()
d_args['control']=control
if opt in ("-e", "--experiment"):
experiment=arg.strip()
d_args['experiment']=experiment
if opt in ("-r","--rfid"):
rfid=arg.strip()
d_args['rfid']=rfid
#a_tp=procTP(arg.strip())
#i_slice=len(a_tp)
#d_args['t']=i_slice
#if opt in ("-r","--resolution"):
# i_sres=int(arg.strip())
# d_args['s']=i_sres
client, session = createSession(d_args['host'], d_args['user'], d_args['password']);
updateService = session.getUpdateService();
queryService = session.getQueryService();
rawFileStore = session.createRawFileStore();
rawPixelsStore = session.createRawPixelsStore();
iPixels = session.getPixelsService();
gateway = session.createGateway();
containerService = session.getContainerService();
a_rawfiles=[]
# for the NO FRET control and FRET positive subdirectories get the names of all the files
#a_dir=[s_dir+'negative/',s_dir+'positive/']
#for i in range(2):
# a_rawfiles.append([])
# d_flist=os.listdir(a_dir[i])
# for j in range(len(d_flist)):
# s_filestr = d_flist[j]
# s_fsplit= s_filestr.split('.')
# if len(s_fsplit)>1:
# if s_fsplit[1]=="raw" :
# a_rawfiles[i].append(a_dir[i]+s_fsplit[0])
a_rawfiles.append(imagesInDataset(containerService, int(d_args['control'])));
a_rawfiles.append(imagesInDataset(containerService, int(d_args['experiment'])));
a_tp = procTP(rawFileStore, queryService, int(d_args['rfid']));
i_sres = a_rawfiles[0][0].getPixels(0).getSizeX().getValue();
d_args['s'] = i_sres;
d_args['t'] = a_rawfiles[0][0].getPixels(0).getSizeC().getValue();
i_slice = d_args['t'];
# Some output stuff to generate latex doc
if rank==0:
os.system('mkdir ' + s_dir);
os.system('mkdir ' + s_dir+'/out');
tex=open(s_dir+texfilename,'w')
tex.write('\\documentclass{/usr/local/share/texmf/proposal} \n')
tex.write('\\usepackage{graphics} \n')
tex.write('\\usepackage{rotating} \n')
tex.write('\\usepackage{ifpdf} \n')
tex.write('\\setlength{\\oddsidemargin}{-1cm} \n')
tex.write('\\setlength{\\topmargin}{-1cm} \n')
tex.write('\\setlength{\\textwidth}{17cm} \n')
tex.write('\\running{FLIM-mpi Results} \n')
tex.write('\\runningsub{} \n')
tex.write('\\begin{document} \n')
tex.write('\\section*{No FRET Files} \n')
# The serious stuff starts here
done=False
group=0
fcount=0
cummean=0
d_args['n']=1.0
totalfitcalls=0
# Loop until all files in both directories are processed
while not done:
# make up name for output files for particular file being processed
# this could use input file name but this is a shorter consistant form
s_id=a_rawfiles[group][fcount].getId().getValue();
# set up meta parameter dictionary that contains useful parameters for subroutines
# m model type 1 or 2
# f file name
d_args['i']=s_id
i_mode=group+1
d_args['m']=i_mode
d_args['p']=0
d_args['f']=a_rawfiles[group][fcount].getPixels(0);
# every process gets a copy of the whole rawdata at for current file at this point
# this would better if only process of rank 0 did this and then once pixels have been assigned
# scattered to each process they got just the pixels they need but this works for now
localraw=procRead(rawPixelsStore, d_args['f'])
localpix=[]
# This is where process rank=0 segments the image and performs global average fit
# then generates pixel lists that are to be scattered to each of
# the processed so each process knows which pixels it is working on
if rank==0:
tex.write("\\verb=%s= \\\\ \n"%d_args['i'])
start=mpi.Wtime()
celpix, initfit,tp,cf1,sig,se,ipar=procSEG(localraw,a_tp,d_args)
tex.write("Status: %d Chisq: %10.5f \\\\ \n"%(initfit.status,initfit.fnorm))
for i in range(len(ipar)-2):
tex.write('%s=%10.6f \\\\ \n'%(paranames[i],ipar[i]))
fig=plt.figure()
ax=fig.add_subplot(111)
ax.errorbar(tp,sig,yerr=se,fmt='go')
ax.plot(tp,cf1,'b-')
fig.savefig(d_args['d']+'out/fit_'+str(d_args['i'])+'.pdf')
pixarray=npy.transpose(npy.nonzero(celpix))
for r in range(size):
pix=[ r + size*i for i in range(int(float(len(pixarray))/size)+1) if r + size*i < len(pixarray) ]
localpix.append([])
for p in pix:
localpix[r].append(pixarray[p])
else:
ipar=None
# scatter list of pixels for processing to each node
localpixels=comm.scatter(localpix,root=0)
# broadcast to all process the parameters from the initial averaged fit
localparams=comm.bcast(ipar,root=0)
# Make the local grids for the processes to fill in the parameter values they calculate
# Again here each process has full size grid but only fill in the pixels it has been assigned
g=npy.zeros((i_sres,i_sres),npy.float)
a1=npy.zeros((i_sres,i_sres),npy.float)
k1=npy.zeros((i_sres,i_sres),npy.float)
f=npy.zeros((i_sres,i_sres),npy.float)
b=npy.zeros((i_sres,i_sres),npy.float)
c=npy.zeros((i_sres,i_sres),npy.float)
if i_mode>1:
a2=npy.zeros((i_sres,i_sres),npy.float)
k2=npy.zeros((i_sres,i_sres),npy.float)
# for each pixel process has been assigne fo the fit
for idx in localpixels:
# PUT FITTING CALL HERE only fit none edge pixels
if idx[0]>0 and idx[0]<i_sres and idx[1]>0 and idx[1]<i_sres:
s=localraw.reshape((i_slice,i_sres,i_sres))[:,idx[0]-1:idx[0]+1,idx[1]-1:idx[1]+1].mean(axis=1).mean(axis=1)
e=localraw.reshape((i_slice,i_sres,i_sres))[:,idx[0]-1:idx[0]+1,idx[1]-1:idx[1]+1].std(axis=1).std(axis=1)
e=npy.zeros(len(s))+1
fit,tp,cf1,sig,se,fpar=procFitIt(s,e,a_tp,i_mode,d_args,localparams)
totalfitcalls+=1
c[idx[0],idx[1]]=fit.fnorm
g[idx[0],idx[1]]=fit.status
if i_mode >1:
a1[idx[0],idx[1]]=fit.params[0]/(fit.params[0]+fit.params[3])
k1[idx[0],idx[1]]=fit.params[1]
b[idx[0],idx[1]]=fit.params[2]
a2[idx[0],idx[1]]=fit.params[3]/(fit.params[0]+fit.params[3])
k2[idx[0],idx[1]]=fit.params[4]
f[idx[0],idx[1]]=(1.0-k1[idx[0],idx[1]]/k2[idx[0],idx[1]])
else:
a1[idx[0],idx[1]]=fit.params[0]
k1[idx[0],idx[1]]=fit.params[1]
b[idx[0],idx[1]]=fit.params[2]
# Gather the results from all nodes
rg=comm.gather(g,root=0)
ra1=comm.gather(a1,root=0)
rk1=comm.gather(k1,root=0)
rb=comm.gather(b,root=0)
rc=comm.gather(c,root=0)
rf=comm.gather(f,root=0)
if i_mode>1:
ra2=comm.gather(a2,root=0)
rk2=comm.gather(k2,root=0)
# Once process rank=0 has gathered all the grids of pixels from the other processes collate them
# to produce one grid with all the values in
if rank == 0:
gg=npy.zeros((i_sres,i_sres),npy.float)
ga1=npy.zeros((i_sres,i_sres),npy.float)
gk1=npy.zeros((i_sres,i_sres),npy.float)
gf=npy.zeros((i_sres,i_sres),npy.float)
gb=npy.zeros((i_sres,i_sres),npy.float)
gc=npy.zeros((i_sres,i_sres),npy.float)
if i_mode>1:
ga2=npy.zeros((i_sres,i_sres),npy.float)
gk2=npy.zeros((i_sres,i_sres),npy.float)
for i in range(len(rg)):
gg+=npy.array(rg[i])
ga1+=npy.array(ra1[i])
gk1+=npy.array(rk1[i])
gf+=npy.array(rf[i])
gb+=npy.array(rb[i])
gc+=npy.array(rc[i])
if i_mode>1:
ga2+=npy.array(ra2[i])
gk2+=npy.array(rk2[i])
# AT THIS POINT NEED TO CALCULATE DISTRIBUTION AND SELECT NO-FRET LIFE TIME FOR GROUP1
# Calculate no fret decay from pixel-by-pixel fit histogram for k1
# generate histogram magic number 1000 bins
k1h=npy.histogram(gk1[npy.where(gk1>0)], bins=1000)
# find midpoint of bins
mp=0.5*(k1h[1][0:-1]+k1h[1][1:])
# get mode
md=argmax(k1h[0][1:])
# calc cumlative sum for lower half and find 50 centile for this range
csl=100*k1h[0][1:md].cumsum()/k1h[0][1:md].sum()
csmn=100-(csl-50)*(csl-50)
csmin=argmax(csmn)
# calc cumlative sum for upper half and find 50 centile for this range
csu=100*k1h[0][md:].cumsum()/k1h[0][md:].sum()
csmx=100-(csu-50)*(csu-50)
csmax=argmax(csmx)
# calculate the mean of this new range
nmean=(k1h[0][csmin:csmax+md]*mp[csmin:csmax+md]).sum()/k1h[0][csmin:csmax+md].sum()
tex.write('Mean %10.5f\\\\ \n'%nmean)
end=mpi.Wtime()
tex.write('Time %9.5f\\\\ \n'%((end-start)))
cummean+=nmean
# THIS BIT IS WHERE PROCESS RANK=0 generates output for use in the tex document
# output pictures
fig=plt.figure()
n,bins,patches=plt.hist(gk1[npy.where(gk1>0)],1000,normed=1,histtype='bar')
fig.savefig(s_dir+'out/gk1h_'+str(s_id)+'.pdf')
fig.clf()
if i_mode>1:
fig=plt.figure()
n,bins,patches=plt.hist(gk2[npy.where(gk2>0)],1000,normed=1,histtype='bar')
fig.savefig(s_dir+'out/gk2h_'+str(s_id)+'.pdf')
fig.clf()
fig=plt.figure()
plt.matshow(ga1,False,cmap=cycm1)
fig.savefig(s_dir+'out/ga1_'+str(s_id)+'.png')
fig.clf()
fig=plt.figure()
plt.matshow(gk1,False,cmap=cycm1)
fig.savefig(s_dir+'out/gk1_'+str(s_id)+'.png')
fig.clf()
fig=plt.figure()
plt.matshow(gb,False,cmap=cycm1)
fig.savefig(s_dir+'out/gb_'+str(s_id)+'.png')
fig.clf()
fig=plt.figure()
plt.matshow(gc,False,cmap=cycm1)
fig.savefig(s_dir+'out/gc_'+str(s_id)+'.png')
fig.clf()
fig=plt.figure()
plt.matshow(gf,False,cmap=cycm1)
fig.savefig(s_dir+'out/gf_'+str(s_id)+'.png')
fig.clf()
fig=plt.figure()
plt.matshow(gg,False,cmap=cycm1)
fig.savefig(s_dir+'out/gg_'+str(s_id)+'.png')
fig.clf()
# This is where process rank=0 saves the csv files with the parameters
npy.savetxt(s_dir+'out/gg_'+str(s_id)+'.txt',gg,fmt='%10.5f',delimiter=',')
npy.savetxt(s_dir+'out/ga1_'+str(s_id)+'.txt',ga1,fmt='%10.5f',delimiter=',')
npy.savetxt(s_dir+'out/gk1_'+str(s_id)+'.txt',gk1,fmt='%10.5f',delimiter=',')
npy.savetxt(s_dir+'out/gf_'+str(s_id)+'.txt',gf,fmt='%10.5f',delimiter=',')
npy.savetxt(s_dir+'out/gb_'+str(s_id)+'.txt',gb,fmt='%10.5f',delimiter=',')
npy.savetxt(s_dir+'out/gc_'+str(s_id)+'.txt',gc,fmt='%10.5f',delimiter=',')
attachFileToImage(gateway, queryService, updateService, rawFileStore, s_id, s_dir+'out/gg_'+str(s_id)+'.txt');
attachFileToImage(gateway, queryService, updateService, rawFileStore, s_id, s_dir+'out/ga1_'+str(s_id)+'.txt');
attachFileToImage(gateway, queryService, updateService, rawFileStore, s_id, s_dir+'out/gk1_'+str(s_id)+'.txt');
attachFileToImage(gateway, queryService, updateService, rawFileStore, s_id, s_dir+'out/gf_'+str(s_id)+'.txt');
attachFileToImage(gateway, queryService, updateService, rawFileStore, s_id, s_dir+'out/gb_'+str(s_id)+'.txt');
attachFileToImage(gateway, queryService, updateService, rawFileStore, s_id, s_dir+'out/gc_'+str(s_id)+'.txt');
if i_mode >1:
fig=plt.figure()
plt.matshow(ga2,False,cmap=cycm1)
fig.savefig(s_dir+'out/ga2_'+str(s_id)+'.png')
fig.clf()
fig=plt.figure()
plt.matshow(gk2,False,cmap=cycm1)
fig.savefig(s_dir+'out/gk2_'+str(s_id)+'.png')
fig.clf()
npy.savetxt(s_dir+'out/ga2_'+str(s_id)+'.txt',ga2,fmt='%10.5f',delimiter=',')
npy.savetxt(s_dir+'out/gk2_'+str(s_id)+'.txt',gk2,fmt='%10.5f',delimiter=',')
attachFileToImage(gateway, queryService, updateService, rawFileStore, s_id, s_dir+'out/ga2_'+str(s_id)+'.txt');
attachFileToImage(gateway, queryService, updateService, rawFileStore, s_id, s_dir+'out/gk2_'+str(s_id)+'.txt');
# all stuff for generating the tex document
tex.write("\\begin{figure}[h!] \n")
tex.write("\\centering \n")
tex.write("\\begin{tabular}{cc} \n")
tex.write("\\includegraphics[scale=0.3]{%sout/gk1h_%s.pdf}& \n"%(s_dir,str(s_id)))
tex.write("\\includegraphics[scale=0.3]{%sout/fit_%s.pdf}\\\\ \n"%(s_dir,str(s_id)))
tex.write('$tau_{1}$ distribution& Global fit\\\\')
tex.write("\\end{tabular} \n")
tex.write("\\end{figure} \n")
tex.write("\\begin{figure}[h!] \n")
tex.write("\\centering \n")
tex.write("\\begin{tabular}{ccc} \n")
tex.write("\\includegraphics[scale=0.2]{%sout/ga1_%s.png}& \n"%(s_dir,str(s_id)))
tex.write("\\includegraphics[scale=0.2]{%sout/gk1_%s.png}& \n"%(s_dir,str(s_id)))
tex.write("\\includegraphics[scale=0.2]{%sout/gb_%s.png}\\\\ \n"%(s_dir,str(s_id)))
tex.write('$\\alpha_{1}$&$\\tau_{1}$&$b$\\\\ \n')
if i_mode>1:
tex.write("\\includegraphics[scale=0.2]{%sout/ga2_%s.png}& \n"%(s_dir,str(s_id)))
tex.write("\\includegraphics[scale=0.2]{%sout/gk2_%s.png}& \n"%(s_dir,str(s_id)))
tex.write("\\includegraphics[scale=0.2]{%sout/gf_%s.png}\\\\ \n"%(s_dir,str(s_id)))
tex.write('$\\alpha_{2}$&$\\tau_{2}$&$E_{f}$\\\\ \n')
tex.write("\\includegraphics[scale=0.2]{%sout/gc_%s.png}& \n"%(s_dir,str(s_id)))
tex.write("\\includegraphics[scale=0.2]{%sout/gg_%s.png}\\\\ \n"%(s_dir,str(s_id)))
tex.write('$\\chi^{2}$&$fit$\\\\ \n')
tex.write("\\end{tabular} \n")
tex.write("\\end{figure} \n")
ncalls=npy.asarray(0,'i')
# This line syncs the processes so they wait for process rank=0 to do its collating and output before getting next file
comm.Allreduce([npy.asarray(totalfitcalls,'i'),mpi.INT], [ncalls,mpi.INT],op=mpi.SUM)
if rank==0:
tex.write('Total pixels fitted=%d\\\\ \n'%(ncalls))
tex.flush()
fcount+=1
# Here the cumlative mean no-fret life time is calculated and the group is changed once all the no-fret files have been processed
if fcount>=len(a_rawfiles[group]):
group+=1
if group>1:
if rank==0:
cummean/=float(fcount)
tex.write('Cumlative mean fret %10.5f \\\\ \n'%cummean)
d_args['n']=cummean
cummean=0
done=True
else:
if rank==0:
cummean/=float(fcount)
tex.write('Cumlative mean no-fret %10.5f \\\\ \n'%cummean)
tex.write('\\section*{FRET conditions} \n')
d_args['n']=cummean
cummean=0
fcount=0
if rank==0:
tex.write('\\newpage\n')
tex.write("\\noindent")
if rank==0:
tex.write('\\end{document}\n')
tex.close()
os.system('pdflatex -output-directory='+s_dir+' '+s_dir+texfilename);
pdfFile = texfilename.rsplit('.tex')[0]+'.pdf';
attachFileToProject(containerService, queryService, updateService, rawFileStore, getParentProject(containerService, int(d_args['experiment'])).getId().getValue(), s_dir+pdfFile);
#-------------------------
if __name__ == "__main__":
sys.exit(mpi_run(sys.argv[1:]))
|
gpl-2.0
|
costypetrisor/scikit-learn
|
sklearn/linear_model/tests/test_sparse_coordinate_descent.py
|
244
|
9986
|
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import ignore_warnings
from sklearn.linear_model.coordinate_descent import (Lasso, ElasticNet,
LassoCV, ElasticNetCV)
def test_sparse_coef():
# Check that the sparse_coef propery works
clf = ElasticNet()
clf.coef_ = [1, 2, 3]
assert_true(sp.isspmatrix(clf.sparse_coef_))
assert_equal(clf.sparse_coef_.toarray().tolist()[0], clf.coef_)
def test_normalize_option():
# Check that the normalize option in enet works
X = sp.csc_matrix([[-1], [0], [1]])
y = [-1, 0, 1]
clf_dense = ElasticNet(fit_intercept=True, normalize=True)
clf_sparse = ElasticNet(fit_intercept=True, normalize=True)
clf_dense.fit(X, y)
X = sp.csc_matrix(X)
clf_sparse.fit(X, y)
assert_almost_equal(clf_dense.dual_gap_, 0)
assert_array_almost_equal(clf_dense.coef_, clf_sparse.coef_)
def test_lasso_zero():
# Check that the sparse lasso can handle zero data without crashing
X = sp.csc_matrix((3, 1))
y = [0, 0, 0]
T = np.array([[1], [2], [3]])
clf = Lasso().fit(X, y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0])
assert_array_almost_equal(pred, [0, 0, 0])
assert_almost_equal(clf.dual_gap_, 0)
def test_enet_toy_list_input():
# Test ElasticNet for various values of alpha and l1_ratio with list X
X = np.array([[-1], [0], [1]])
X = sp.csc_matrix(X)
Y = [-1, 0, 1] # just a straight line
T = np.array([[2], [3], [4]]) # test sample
# this should be the same as unregularized least squares
clf = ElasticNet(alpha=0, l1_ratio=1.0)
# catch warning about alpha=0.
# this is discouraged but should work.
ignore_warnings(clf.fit)(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.3, max_iter=1000)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.45454], 3)
assert_array_almost_equal(pred, [0.9090, 1.3636, 1.8181], 3)
assert_almost_equal(clf.dual_gap_, 0)
def test_enet_toy_explicit_sparse_input():
# Test ElasticNet for various values of alpha and l1_ratio with sparse X
f = ignore_warnings
# training samples
X = sp.lil_matrix((3, 1))
X[0, 0] = -1
# X[1, 0] = 0
X[2, 0] = 1
Y = [-1, 0, 1] # just a straight line (the identity function)
# test samples
T = sp.lil_matrix((3, 1))
T[0, 0] = 2
T[1, 0] = 3
T[2, 0] = 4
# this should be the same as lasso
clf = ElasticNet(alpha=0, l1_ratio=1.0)
f(clf.fit)(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.3, max_iter=1000)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.45454], 3)
assert_array_almost_equal(pred, [0.9090, 1.3636, 1.8181], 3)
assert_almost_equal(clf.dual_gap_, 0)
def make_sparse_data(n_samples=100, n_features=100, n_informative=10, seed=42,
positive=False, n_targets=1):
random_state = np.random.RandomState(seed)
# build an ill-posed linear regression problem with many noisy features and
# comparatively few samples
# generate a ground truth model
w = random_state.randn(n_features, n_targets)
w[n_informative:] = 0.0 # only the top features are impacting the model
if positive:
w = np.abs(w)
X = random_state.randn(n_samples, n_features)
rnd = random_state.uniform(size=(n_samples, n_features))
X[rnd > 0.5] = 0.0 # 50% of zeros in input signal
# generate training ground truth labels
y = np.dot(X, w)
X = sp.csc_matrix(X)
if n_targets == 1:
y = np.ravel(y)
return X, y
def _test_sparse_enet_not_as_toy_dataset(alpha, fit_intercept, positive):
n_samples, n_features, max_iter = 100, 100, 1000
n_informative = 10
X, y = make_sparse_data(n_samples, n_features, n_informative,
positive=positive)
X_train, X_test = X[n_samples // 2:], X[:n_samples // 2]
y_train, y_test = y[n_samples // 2:], y[:n_samples // 2]
s_clf = ElasticNet(alpha=alpha, l1_ratio=0.8, fit_intercept=fit_intercept,
max_iter=max_iter, tol=1e-7, positive=positive,
warm_start=True)
s_clf.fit(X_train, y_train)
assert_almost_equal(s_clf.dual_gap_, 0, 4)
assert_greater(s_clf.score(X_test, y_test), 0.85)
# check the convergence is the same as the dense version
d_clf = ElasticNet(alpha=alpha, l1_ratio=0.8, fit_intercept=fit_intercept,
max_iter=max_iter, tol=1e-7, positive=positive,
warm_start=True)
d_clf.fit(X_train.toarray(), y_train)
assert_almost_equal(d_clf.dual_gap_, 0, 4)
assert_greater(d_clf.score(X_test, y_test), 0.85)
assert_almost_equal(s_clf.coef_, d_clf.coef_, 5)
assert_almost_equal(s_clf.intercept_, d_clf.intercept_, 5)
# check that the coefs are sparse
assert_less(np.sum(s_clf.coef_ != 0.0), 2 * n_informative)
def test_sparse_enet_not_as_toy_dataset():
_test_sparse_enet_not_as_toy_dataset(alpha=0.1, fit_intercept=False,
positive=False)
_test_sparse_enet_not_as_toy_dataset(alpha=0.1, fit_intercept=True,
positive=False)
_test_sparse_enet_not_as_toy_dataset(alpha=1e-3, fit_intercept=False,
positive=True)
_test_sparse_enet_not_as_toy_dataset(alpha=1e-3, fit_intercept=True,
positive=True)
def test_sparse_lasso_not_as_toy_dataset():
n_samples = 100
max_iter = 1000
n_informative = 10
X, y = make_sparse_data(n_samples=n_samples, n_informative=n_informative)
X_train, X_test = X[n_samples // 2:], X[:n_samples // 2]
y_train, y_test = y[n_samples // 2:], y[:n_samples // 2]
s_clf = Lasso(alpha=0.1, fit_intercept=False, max_iter=max_iter, tol=1e-7)
s_clf.fit(X_train, y_train)
assert_almost_equal(s_clf.dual_gap_, 0, 4)
assert_greater(s_clf.score(X_test, y_test), 0.85)
# check the convergence is the same as the dense version
d_clf = Lasso(alpha=0.1, fit_intercept=False, max_iter=max_iter, tol=1e-7)
d_clf.fit(X_train.toarray(), y_train)
assert_almost_equal(d_clf.dual_gap_, 0, 4)
assert_greater(d_clf.score(X_test, y_test), 0.85)
# check that the coefs are sparse
assert_equal(np.sum(s_clf.coef_ != 0.0), n_informative)
def test_enet_multitarget():
n_targets = 3
X, y = make_sparse_data(n_targets=n_targets)
estimator = ElasticNet(alpha=0.01, fit_intercept=True, precompute=None)
# XXX: There is a bug when precompute is not None!
estimator.fit(X, y)
coef, intercept, dual_gap = (estimator.coef_,
estimator.intercept_,
estimator.dual_gap_)
for k in range(n_targets):
estimator.fit(X, y[:, k])
assert_array_almost_equal(coef[k, :], estimator.coef_)
assert_array_almost_equal(intercept[k], estimator.intercept_)
assert_array_almost_equal(dual_gap[k], estimator.dual_gap_)
def test_path_parameters():
X, y = make_sparse_data()
max_iter = 50
n_alphas = 10
clf = ElasticNetCV(n_alphas=n_alphas, eps=1e-3, max_iter=max_iter,
l1_ratio=0.5, fit_intercept=False)
ignore_warnings(clf.fit)(X, y) # new params
assert_almost_equal(0.5, clf.l1_ratio)
assert_equal(n_alphas, clf.n_alphas)
assert_equal(n_alphas, len(clf.alphas_))
sparse_mse_path = clf.mse_path_
ignore_warnings(clf.fit)(X.toarray(), y) # compare with dense data
assert_almost_equal(clf.mse_path_, sparse_mse_path)
def test_same_output_sparse_dense_lasso_and_enet_cv():
X, y = make_sparse_data(n_samples=40, n_features=10)
for normalize in [True, False]:
clfs = ElasticNetCV(max_iter=100, cv=5, normalize=normalize)
ignore_warnings(clfs.fit)(X, y)
clfd = ElasticNetCV(max_iter=100, cv=5, normalize=normalize)
ignore_warnings(clfd.fit)(X.toarray(), y)
assert_almost_equal(clfs.alpha_, clfd.alpha_, 7)
assert_almost_equal(clfs.intercept_, clfd.intercept_, 7)
assert_array_almost_equal(clfs.mse_path_, clfd.mse_path_)
assert_array_almost_equal(clfs.alphas_, clfd.alphas_)
clfs = LassoCV(max_iter=100, cv=4, normalize=normalize)
ignore_warnings(clfs.fit)(X, y)
clfd = LassoCV(max_iter=100, cv=4, normalize=normalize)
ignore_warnings(clfd.fit)(X.toarray(), y)
assert_almost_equal(clfs.alpha_, clfd.alpha_, 7)
assert_almost_equal(clfs.intercept_, clfd.intercept_, 7)
assert_array_almost_equal(clfs.mse_path_, clfd.mse_path_)
assert_array_almost_equal(clfs.alphas_, clfd.alphas_)
|
bsd-3-clause
|
pastas/pastas
|
pastas/stressmodels.py
|
1
|
49209
|
"""This module contains all the stress models available in Pastas.
Stress models are used to translate an input time series into a
contribution that explains (part of) the output series.
Examples
--------
>>> sm = ps.StressModel(stress, rfunc=ps.Gamma, name="sm1")
>>> ml.add_stressmodel(stressmodel=sm)
See Also
--------
pastas.model.Model.add_stressmodel
"""
from logging import getLogger
import numpy as np
from pandas import date_range, Series, Timedelta, DataFrame, concat, Timestamp
from scipy.signal import fftconvolve
from .decorators import set_parameter, njit
from .recharge import Linear
from .rfunc import One, Exponential, HantushWellModel
from .timeseries import TimeSeries
from .utils import validate_name, check_numba
logger = getLogger(__name__)
__all__ = ["StressModel", "StressModel2", "Constant", "StepModel",
"LinearTrend", "RechargeModel", "WellModel", "TarsoModel"]
class StressModelBase:
"""StressModel Base class called by each StressModel object.
Attributes
----------
name: str
Name of this stressmodel object. Used as prefix for the parameters.
parameters: pandas.DataFrame
Dataframe containing the parameters.
"""
_name = "StressModelBase"
def __init__(self, name, tmin, tmax, rfunc=None):
self.name = validate_name(name)
self.tmin = tmin
self.tmax = tmax
self.freq = None
self.rfunc = rfunc
self.parameters = DataFrame(
columns=['initial', 'pmin', 'pmax', 'vary', 'name'])
self.stress = []
@property
def nparam(self):
return self.parameters.index.size
def set_init_parameters(self):
"""Set the initial parameters (back) to their default values."""
@set_parameter
def _set_initial(self, name, value):
"""Internal method to set the initial parameter value.
Notes
-----
The preferred method for parameter setting is through the model.
"""
self.parameters.loc[name, 'initial'] = value
@set_parameter
def _set_pmin(self, name, value):
"""Internal method to set the lower bound of the parameter value.
Notes
-----
The preferred method for parameter setting is through the model.
"""
self.parameters.loc[name, 'pmin'] = value
@set_parameter
def _set_pmax(self, name, value):
"""Internal method to set the upper bound of the parameter value.
Notes
-----
The preferred method for parameter setting is through the model.
"""
self.parameters.loc[name, 'pmax'] = value
@set_parameter
def _set_vary(self, name, value):
"""Internal method to set if the parameter is varied during
optimization.
Notes
-----
The preferred method for parameter setting is through the model.
"""
self.parameters.loc[name, 'vary'] = bool(value)
def update_stress(self, **kwargs):
"""Method to update the settings of the individual TimeSeries.
Notes
-----
For the individual options for the different settings please refer to
the docstring from the TimeSeries.update_series() method.
See Also
--------
ps.timeseries.TimeSeries.update_series
"""
for stress in self.stress:
stress.update_series(**kwargs)
if "freq" in kwargs:
self.freq = kwargs["freq"]
def dump_stress(self, series=True):
"""Method to dump all stresses in the stresses list.
Parameters
----------
series: bool, optional
True if time series are to be exported, False if only the name
of the time series are needed. Settings are always exported.
Returns
-------
data: dict
dictionary with the dump of the stresses.
"""
data = []
for stress in self.stress:
data.append(stress.to_dict(series=series))
return data
def get_stress(self, p=None, tmin=None, tmax=None, freq=None,
istress=None, **kwargs):
"""Returns the stress or stresses of the time series object as a pandas
DataFrame.
If the time series object has multiple stresses each column
represents a stress.
Returns
-------
stress: pandas.Dataframe
Pandas dataframe of the stress(es)
"""
if tmin is None:
tmin = self.tmin
if tmax is None:
tmax = self.tmax
self.update_stress(tmin=tmin, tmax=tmax, freq=freq)
return self.stress[0].series
def to_dict(self, series=True):
"""Method to export the StressModel object.
Returns
-------
data: dict
dictionary with all necessary information to reconstruct the
StressModel object.
"""
data = {
"stressmodel": self._name,
"name": self.name,
"stress": self.dump_stress(series)
}
return data
def get_nsplit(self):
"""Determine in how many timeseries the contribution can be split."""
if hasattr(self, 'nsplit'):
return self.nsplit
else:
return len(self.stress)
def _get_block(self, p, dt, tmin, tmax):
"""Internal method to get the block-response function"""
if tmin is not None and tmax is not None:
day = Timedelta(1, 'D')
maxtmax = (Timestamp(tmax) - Timestamp(tmin)) / day
else:
maxtmax = None
b = self.rfunc.block(p, dt, maxtmax=maxtmax)
return b
class StressModel(StressModelBase):
"""Time series model consisting of the convolution of one stress with one
response function.
Parameters
----------
stress: pandas.Series
pandas Series object containing the stress.
rfunc: rfunc class
Response function used in the convolution with the stress.
name: str
Name of the stress.
up: bool or None, optional
True if response function is positive (default), False if negative.
None if you don't want to define if response is positive or negative.
cutoff: float, optional
float between 0 and 1 to determine how long the response is (default
is 99% of the actual response time). Used to reduce computation times.
settings: dict or str, optional
The settings of the stress. This can be a string referring to a
predefined settings dict, or a dict with the settings to apply.
Refer to the docstring of pastas.Timeseries for further information.
metadata: dict, optional
dictionary containing metadata about the stress. This is passed onto
the TimeSeries object.
meanstress: float, optional
The mean stress determines the initial parameters of rfunc. The initial
parameters are chosen in such a way that the gain of meanstress is 1.
Examples
--------
>>> import pastas as ps
>>> import pandas as pd
>>> sm = ps.StressModel(stress=pd.Series(), rfunc=ps.Gamma, name="Prec",
>>> settings="prec")
See Also
--------
pastas.rfunc
pastas.timeseries.TimeSeries
"""
_name = "StressModel"
def __init__(self, stress, rfunc, name, up=True, cutoff=0.999,
settings=None, metadata=None, meanstress=None):
if isinstance(stress, list):
stress = stress[0] # TODO Temporary fix Raoul, 2017-10-24
stress = TimeSeries(stress, settings=settings, metadata=metadata)
if meanstress is None:
meanstress = stress.series.std()
rfunc = rfunc(up=up, cutoff=cutoff, meanstress=meanstress)
StressModelBase.__init__(self, name=name,
tmin=stress.series.index.min(),
tmax=stress.series.index.max(), rfunc=rfunc)
self.freq = stress.settings["freq"]
self.stress = [stress]
self.set_init_parameters()
def set_init_parameters(self):
"""Set the initial parameters (back) to their default values.
"""
self.parameters = self.rfunc.get_init_parameters(self.name)
def simulate(self, p, tmin=None, tmax=None, freq=None, dt=1.0):
"""Simulates the head contribution.
Parameters
----------
p: array_like
array_like object with the values as floats representing the
model parameters.
tmin: str, optional
tmax: str, optional
freq: str, optional
dt: int, optional
Returns
-------
pandas.Series
The simulated head contribution.
"""
self.update_stress(tmin=tmin, tmax=tmax, freq=freq)
b = self._get_block(p, dt, tmin, tmax)
stress = self.stress[0].series
npoints = stress.index.size
h = Series(data=fftconvolve(stress, b, 'full')[:npoints],
index=stress.index, name=self.name, fastpath=True)
return h
def to_dict(self, series=True):
"""Method to export the StressModel object.
Returns
-------
data: dict
dictionary with all necessary information to reconstruct the
StressModel object.
"""
data = {
"stressmodel": self._name,
"rfunc": self.rfunc._name,
"name": self.name,
"up": self.rfunc.up,
"cutoff": self.rfunc.cutoff,
"stress": self.dump_stress(series)
}
return data
class StressModel2(StressModelBase):
"""Time series model consisting of the convolution of two stresses with one
response function. The first stress causes the head to go up and the second
stress causes the head to go down.
Parameters
----------
stress: list of pandas.Series or list of pastas.timeseries
list of two pandas.Series or pastas.timeseries objects containing the
stresses. Usually the first is the precipitation and the second the
evaporation.
rfunc: pastas.rfunc instance
Response function used in the convolution with the stress.
name: str
Name of the stress
up: bool or None, optional
True if response function is positive (default), False if negative.
None if you don't want to define if response is positive or negative.
cutoff: float, optional
float between 0 and 1 to determine how long the response is (default
is 99.9% of the actual response time). Used to reduce computation
times.
settings: Tuple with two dicts, optional
The settings of the individual TimeSeries.
settings: list of dicts or strs, optional
The settings of the stresses. This can be a string referring to a
predefined settings dict, or a dict with the settings to apply.
Refer to the docstring of pastas.Timeseries for further information.
Default is ("prec", "evap").
metadata: list of dicts, optional
dictionary containing metadata about the stress. This is passed onto
the TimeSeries object.
Notes
-----
The order in which the stresses are provided is the order the metadata
and settings dictionaries or string are passed onto the TimeSeries
objects. By default, the precipitation stress is the first and the
evaporation stress the second stress.
See Also
--------
pastas.rfunc
pastas.timeseries
"""
_name = "StressModel2"
def __init__(self, stress, rfunc, name, up=True, cutoff=0.999,
settings=("prec", "evap"), metadata=(None, None),
meanstress=None):
# First check the series, then determine tmin and tmax
stress0 = TimeSeries(stress[0], settings=settings[0],
metadata=metadata[0])
stress1 = TimeSeries(stress[1], settings=settings[1],
metadata=metadata[1])
# Select indices from validated stress where both series are available.
index = stress0.series.index.intersection(stress1.series.index)
if index.empty:
msg = ('The two stresses that were provided have no '
'overlapping time indices. Please make sure the '
'indices of the time series overlap.')
logger.error(msg)
raise Exception(msg)
# First check the series, then determine tmin and tmax
stress0.update_series(tmin=index.min(), tmax=index.max())
stress1.update_series(tmin=index.min(), tmax=index.max())
if meanstress is None:
meanstress = (stress0.series - stress1.series).std()
rfunc = rfunc(up=up, cutoff=cutoff, meanstress=meanstress)
StressModelBase.__init__(self, name=name, tmin=index.min(),
tmax=index.max(), rfunc=rfunc)
self.stress.append(stress0)
self.stress.append(stress1)
self.freq = stress0.settings["freq"]
self.set_init_parameters()
def set_init_parameters(self):
"""Set the initial parameters back to their default values.
"""
self.parameters = self.rfunc.get_init_parameters(self.name)
self.parameters.loc[self.name + '_f'] = \
(-1.0, -2.0, 0.0, True, self.name)
def simulate(self, p, tmin=None, tmax=None, freq=None, dt=1, istress=None):
"""Simulates the head contribution.
Parameters
----------
p: array_like
array_like object with the values as floats representing the
model parameters.
tmin: str, optional
tmax: str, optional
freq: str, optional
dt: int, optional
istress: int, optional
Returns
-------
pandas.Series
The simulated head contribution.
"""
b = self._get_block(p[:-1], dt, tmin, tmax)
stress = self.get_stress(p=p, tmin=tmin, tmax=tmax, freq=freq,
istress=istress)
if istress == 1:
stress = p[-1] * stress
npoints = stress.index.size
h = Series(data=fftconvolve(stress, b, 'full')[:npoints],
index=stress.index, name=self.name, fastpath=True)
if istress is not None:
if self.stress[istress].name is not None:
h.name = h.name + ' (' + self.stress[istress].name + ')'
return h
def get_stress(self, p=None, tmin=None, tmax=None, freq=None,
istress=None, **kwargs):
if tmin is None:
tmin = self.tmin
if tmax is None:
tmax = self.tmax
self.update_stress(tmin=tmin, tmax=tmax, freq=freq)
if istress is None:
if p is None:
p = self.parameters.initial.values
return self.stress[0].series.add(p[-1] * self.stress[1].series)
elif istress == 0:
return self.stress[0].series
else:
return self.stress[1].series
def to_dict(self, series=True):
"""Method to export the StressModel object.
Returns
-------
data: dict
dictionary with all necessary information to reconstruct the
StressModel object.
"""
data = {
"stressmodel": self._name,
"rfunc": self.rfunc._name,
"name": self.name,
"up": self.rfunc.up,
"cutoff": self.rfunc.cutoff,
"stress": self.dump_stress(series)
}
return data
class StepModel(StressModelBase):
"""Stressmodel that simulates a step trend.
Parameters
----------
tstart: str or Timestamp
String with the start date of the step, e.g. '2018-01-01'. This
value is fixed by default. Use ml.set_parameter("step_tstart",
vary=True) to vary the start time of the step trend.
name: str
String with the name of the stressmodel.
rfunc: pastas.rfunc.RfuncBase class, optional
Pastas response function used to simulate the effect of the step.
Default is rfunc.One, an instant effect.
up: bool, optional
Force a direction of the step. Default is None.
cutoff: float, optional
float between 0 and 1 to determine how long the response is (default
is 99.9% of the actual response time). Used to reduce computation
times.
Notes
-----
The step trend is calculated as follows. First, a binary series is
created, with zero values before tstart, and ones after the start. This
series is convoluted with the block response to simulate a step trend.
"""
_name = "StepModel"
def __init__(self, tstart, name, rfunc=One, up=True, cutoff=0.999):
rfunc = rfunc(up=up, cutoff=cutoff, meanstress=1.0)
StressModelBase.__init__(self, name=name, tmin=Timestamp.min,
tmax=Timestamp.max, rfunc=rfunc)
self.tstart = Timestamp(tstart)
self.set_init_parameters()
def set_init_parameters(self):
self.parameters = self.rfunc.get_init_parameters(self.name)
tmin = Timestamp.min.toordinal()
tmax = Timestamp.max.toordinal()
tinit = self.tstart.toordinal()
self.parameters.loc[self.name + "_tstart"] = (tinit, tmin, tmax,
False, self.name)
def simulate(self, p, tmin=None, tmax=None, freq=None, dt=1):
tstart = Timestamp.fromordinal(int(p[-1]), freq="D")
tindex = date_range(tmin, tmax, freq=freq)
h = Series(0, tindex, name=self.name)
h.loc[h.index > tstart] = 1
b = self._get_block(p[:-1], dt, tmin, tmax)
npoints = h.index.size
h = Series(data=fftconvolve(h, b, 'full')[:npoints],
index=h.index, name=self.name, fastpath=True)
return h
def to_dict(self, series=True):
data = {
"stressmodel": self._name,
'tstart': self.tstart,
'name': self.name,
"up": self.rfunc.up,
'rfunc': self.rfunc._name
}
return data
class LinearTrend(StressModelBase):
"""Stressmodel that simulates a linear trend.
Parameters
----------
start: str
String with a date to start the trend (e.g., "2018-01-01"), will be
transformed to an ordinal number internally.
end: str
String with a date to end the trend (e.g., "2018-01-01"), will be
transformed to an ordinal number internally.
name: str, optional
String with the name of the stress model.
Notes
-----
While possible, it is not recommended to vary the parameters for the
start and end time of the linear trend. These parameters are usually
hard to impossible to estimate from the data.
"""
_name = "LinearTrend"
def __init__(self, start, end, name="trend"):
StressModelBase.__init__(self, name=name, tmin=Timestamp.min,
tmax=Timestamp.max)
self.start = start
self.end = end
self.set_init_parameters()
def set_init_parameters(self):
"""Set the initial parameters for the stress model."""
start = Timestamp(self.start).toordinal()
end = Timestamp(self.end).toordinal()
tmin = Timestamp.min.toordinal()
tmax = Timestamp.max.toordinal()
self.parameters.loc[self.name + "_a"] = (0.0, -np.inf, np.inf,
True, self.name)
self.parameters.loc[self.name + "_tstart"] = (start, tmin, tmax,
False, self.name)
self.parameters.loc[self.name + "_tend"] = (end, tmin, tmax,
False, self.name)
def simulate(self, p, tmin=None, tmax=None, freq=None, dt=1):
"""Simulate the trend."""
tindex = date_range(tmin, tmax, freq=freq)
if p[1] < tindex[0].toordinal():
tmin = tindex[0]
else:
tmin = Timestamp.fromordinal(int(p[1]))
if p[2] >= tindex[-1].toordinal():
tmax = tindex[-1]
else:
tmax = Timestamp.fromordinal(int(p[2]))
trend = tindex.to_series().diff() / Timedelta(1, "D")
trend.loc[:tmin] = 0
trend.loc[tmax:] = 0
trend = trend.cumsum() * p[0]
return trend.rename(self.name)
def to_dict(self, series=None):
data = {
"stressmodel": self._name,
'start': self.start,
"end": self.end,
'name': self.name,
}
return data
class Constant(StressModelBase):
"""A constant value that is added to the time series model.
Parameters
----------
name: str, optional
Name of the stressmodel
initial: float, optional
Initial estimate of the parameter value. E.g. The minimum of the
observed series.
"""
_name = "Constant"
def __init__(self, name="constant", initial=0.0):
StressModelBase.__init__(self, name=name, tmin=Timestamp.min,
tmax=Timestamp.max)
self.initial = initial
self.set_init_parameters()
def set_init_parameters(self):
self.parameters.loc[self.name + '_d'] = (
self.initial, np.nan, np.nan, True, self.name)
@staticmethod
def simulate(p=None):
return p
class WellModel(StressModelBase):
"""Convolution of one or more stresses with one response function.
Parameters
----------
stress: list
list containing the stresses timeseries.
rfunc: pastas.rfunc
this model only works with the HantushWellModel response function.
name: str
Name of the stressmodel.
distances: list or list-like
list of distances to oseries, must be ordered the same as the
stresses.
up: bool, optional
whether a positive stress has an increasing or decreasing effect on
the model, by default False, in which case positive stress lowers
e.g., the groundwater level.
cutoff: float, optional
float between 0 and 1 to determine how long the response is (default
is 99.9% of the actual response time). Used to reduce computation
times.
settings: str, list of dict, optional
settings of the timeseries, by default "well".
sort_wells: bool, optional
sort wells from closest to furthest, by default True.
Notes
-----
This class implements convolution of multiple series with a the same
response function. This can be applied when dealing with multiple
wells in a time series model. The distance from an influence to the
location of the oseries has to be provided for each stress.
Warnings
--------
This model only works with the HantushWellModel response function.
"""
_name = "WellModel"
def __init__(self, stress, rfunc, name, distances, up=False, cutoff=0.999,
settings="well", sort_wells=True):
if not issubclass(rfunc, HantushWellModel):
raise NotImplementedError("WellModel only supports the rfunc "
"HantushWellModel!")
logger.warning("It is recommended to use LmfitSolve as the solver "
"when implementing WellModel. See "
"https://github.com/pastas/pastas/issues/177.")
# sort wells by distance
self.sort_wells = sort_wells
if self.sort_wells:
stress = [s for _, s in sorted(zip(distances, stress),
key=lambda pair: pair[0])]
if isinstance(settings, list):
settings = [s for _, s in sorted(zip(distances, settings),
key=lambda pair: pair[0])]
distances = np.sort(distances)
if settings is None or isinstance(settings, str):
settings = len(stress) * [settings]
# convert stresses to TimeSeries if necessary
stress = self.handle_stress(stress, settings)
# Check if number of stresses and distances match
if len(stress) != len(distances):
msg = "The number of stresses does not match the number" \
"of distances provided."
logger.error(msg)
raise ValueError(msg)
else:
self.distances = Series(index=[s.name for s in stress],
data=distances,
name="distances")
meanstress = np.max([s.series.std() for s in stress])
rfunc = rfunc(up=up, cutoff=cutoff, meanstress=meanstress,
distances=self.distances.values)
tmin = np.min([s.series.index.min() for s in stress])
tmax = np.max([s.series.index.max() for s in stress])
StressModelBase.__init__(self, name=name, tmin=tmin,
tmax=tmax, rfunc=rfunc)
self.stress = stress
self.freq = self.stress[0].settings["freq"]
self.set_init_parameters()
def set_init_parameters(self):
self.parameters = self.rfunc.get_init_parameters(self.name)
def simulate(self, p=None, tmin=None, tmax=None, freq=None, dt=1,
istress=None, **kwargs):
distances = self.get_distances(istress=istress)
stress_df = self.get_stress(p=p, tmin=tmin, tmax=tmax, freq=freq,
istress=istress)
h = Series(data=0, index=self.stress[0].series.index, name=self.name)
for name, r in distances.iteritems():
stress = stress_df.loc[:, name]
npoints = stress.index.size
p_with_r = np.concatenate([p, np.array([r])])
b = self._get_block(p_with_r, dt, tmin, tmax)
c = fftconvolve(stress, b, 'full')[:npoints]
h = h.add(Series(c, index=stress.index, fastpath=True),
fill_value=0.0)
if istress is not None:
if isinstance(istress, list):
h.name = self.name + "_" + "+".join(str(i) for i in istress)
elif self.stress[istress].name is not None:
h.name = self.stress[istress].name
else:
h.name = self.name + "_" + str(istress)
else:
h.name = self.name
return h
@staticmethod
def handle_stress(stress, settings):
"""Internal method to handle user provided stress in init.
Parameters
----------
stress: pandas.Series, pastas.TimeSeries, list or dict
stress or collection of stresses
settings: dict or iterable
settings dictionary
Returns
-------
stress: list
return a list with the stresses transformed to pastas TimeSeries.
"""
data = []
if isinstance(stress, Series):
data.append(TimeSeries(stress, settings=settings))
elif isinstance(stress, dict):
for i, (name, value) in enumerate(stress.items()):
data.append(TimeSeries(value, name=name, settings=settings[i]))
elif isinstance(stress, list):
for i, value in enumerate(stress):
data.append(TimeSeries(value, settings=settings[i]))
else:
logger.error("Stress format is unknown. Provide a Series, "
"dict or list.")
return data
def get_stress(self, p=None, tmin=None, tmax=None, freq=None,
istress=None, **kwargs):
if tmin is None:
tmin = self.tmin
if tmax is None:
tmax = self.tmax
self.update_stress(tmin=tmin, tmax=tmax, freq=freq)
if istress is None:
return DataFrame.from_dict({s.name: s.series for s in self.stress})
elif isinstance(istress, list):
return DataFrame.from_dict(
{s.name: s.series for s in self.stress}
).iloc[:, istress]
else:
return self.stress[istress].series.to_frame()
def get_distances(self, istress=None):
if istress is None:
return self.distances
elif isinstance(istress, list):
return self.distances.iloc[istress]
else:
return self.distances.iloc[istress:istress + 1]
def get_parameters(self, model=None, istress=None):
""" Get parameters including distance to observation point and
return as array (dimensions = (nstresses, 4)).
Parameters
----------
model : pastas.Model, optional
if provided, return optimal model parameters, else return
initial parameters
istress : int, optional
if provided, return specific parameter set, else
return all parameters
Returns
-------
p : np.array
parameters for each stress as row of array, if istress is used
returns only one row.
"""
if model is None:
p = self.parameters.initial.values
else:
p = model.get_parameters(self.name)
distances = self.get_distances(istress=istress).values
if distances.size > 1:
p_with_r = np.concatenate([np.tile(p, (distances.size, 1)),
distances[:, np.newaxis]], axis=1)
else:
p_with_r = np.r_[p, distances]
return p_with_r
def to_dict(self, series=True):
"""Method to export the WellModel object.
Returns
-------
data: dict
dictionary with all necessary information to reconstruct the
WellModel object.
"""
data = {
"stressmodel": self._name,
"rfunc": self.rfunc._name,
"name": self.name,
"up": True if self.rfunc.up else False,
"distances": self.distances.to_list(),
"cutoff": self.rfunc.cutoff,
"stress": self.dump_stress(series),
"sort_wells": self.sort_wells
}
return data
class RechargeModel(StressModelBase):
"""Stressmodel simulating the effect of groundwater recharge on the
groundwater head.
Parameters
----------
prec: pandas.Series or pastas.timeseries.TimeSeries
pandas.Series or pastas.timeseries object containing the
precipitation series.
evap: pandas.Series or pastas.timeseries.TimeSeries
pandas.Series or pastas.timeseries object containing the
evaporation series.
rfunc: pastas.rfunc class, optional
Response function used in the convolution with the stress. Default
is Exponential.
name: str, optional
Name of the stress. Default is "recharge".
recharge: pastas.recharge instance, optional
String with the name of the recharge model. Options are: Linear (
default), FlexModel and Berendrecht. These can be accessed through
ps.rch.
temp: pandas.Series or pastas.TimeSeries, optional
pandas.Series or pastas.TimeSeries object containing the
temperature series. It depends on the recharge model is this
argument is required or not.
cutoff: float, optional
float between 0 and 1 to determine how long the response is (default)
is 99.9% of the actual response time). Used to reduce computation
times.
settings: list of dicts or str, optional
The settings of the precipitation and evaporation time series,
in this order. This can be a string referring to a predefined
settings dict, or a dict with the settings to apply. Refer to the
docstring of pastas.Timeseries for further information. Default is (
"prec", "evap").
metadata: tuple of dicts or list of dicts, optional
dictionary containing metadata about the stress. This is passed onto
the TimeSeries object.
See Also
--------
pastas.rfunc
pastas.timeseries
pastas.rch
Notes
-----
This stress model computes the contribution of precipitation and
potential evaporation in two steps. In the first step a recharge flux is
computed by a model determined by the input argument `recharge`. In the
second step this recharge flux is convoluted with a response function to
obtain the contribution of recharge to the groundwater levels.
Examples
--------
>>> sm = ps.RechargeModel(rain, evap, rfunc=ps.Exponential,
>>> recharge=ps.rch.FlexModel(), name="rch")
>>> ml.add_stressmodel(sm)
Warning
-------
We recommend not to store a RechargeModel is a variable named `rm`. This
name is already reserved in IPython to remove files and will cause
problems later.
"""
_name = "RechargeModel"
def __init__(self, prec, evap, rfunc=Exponential, name="recharge",
recharge=Linear(), temp=None, cutoff=0.999,
settings=("prec", "evap"), metadata=(None, None)):
# Store the precipitation and evaporation time series
self.prec = TimeSeries(prec, settings=settings[0],
metadata=metadata[0])
self.evap = TimeSeries(evap, settings=settings[1],
metadata=metadata[1])
# Check if both series have a regular time step
if self.prec.freq_original is None:
msg = "Frequency of the precipitation series could not be " \
"determined. Please provide a time series with a regular " \
"time step."
raise IndexError(msg)
if self.evap.freq_original is None:
msg = "Frequency of the evaporation series could not be " \
"determined. Please provide a time series with a regular " \
"time step."
raise IndexError(msg)
# Store recharge object
self.recharge = recharge
# Store a temperature time series if needed or set to None
if self.recharge.temp is True:
if temp is None:
msg = "Recharge module requires a temperature series. " \
"No temperature series were provided"
raise TypeError(msg)
elif len(settings) < 3 or len(metadata) < 3:
msg = "Number of values for the settings and/or metadata is " \
"incorrect."
raise TypeError(msg)
else:
self.temp = TimeSeries(temp, settings=settings[2],
metadata=metadata[2])
else:
self.temp = None
# Select indices from validated stress where both series are available.
index = self.prec.series.index.intersection(self.evap.series.index)
if index.empty:
msg = ("The stresses that were provided have no overlapping"
"time indices. Please make sure the indices of the time"
"series overlap.")
logger.error(msg)
raise Exception(msg)
# Calculate initial recharge estimation for initial rfunc parameters
p = self.recharge.get_init_parameters().initial.values
meanstress = self.get_stress(p=p, tmin=index.min(), tmax=index.max(),
freq=self.prec.settings["freq"]).std()
rfunc = rfunc(up=True, cutoff=cutoff, meanstress=meanstress)
StressModelBase.__init__(self, name=name, tmin=index.min(),
tmax=index.max(), rfunc=rfunc)
self.stress = [self.prec, self.evap]
if self.temp:
self.stress.append(self.temp)
self.freq = self.prec.settings["freq"]
self.set_init_parameters()
if isinstance(self.recharge, Linear):
self.nsplit = 2
else:
self.nsplit = 1
def set_init_parameters(self):
"""Internal method to set the initial parameters."""
self.parameters = concat(
[self.rfunc.get_init_parameters(self.name),
self.recharge.get_init_parameters(self.name)
])
def update_stress(self, **kwargs):
"""Method to update the settings of the individual TimeSeries.
Notes
-----
For the individual options for the different settings please refer to
the docstring from the TimeSeries.update_series() method.
See Also
--------
ps.timeseries.TimeSeries.update_series
"""
self.prec.update_series(**kwargs)
self.evap.update_series(**kwargs)
if self.temp is not None:
self.temp.update_series(**kwargs)
if "freq" in kwargs:
self.freq = kwargs["freq"]
def simulate(self, p=None, tmin=None, tmax=None, freq=None, dt=1.0,
istress=None):
"""Method to simulate the contribution of recharge to the head.
Parameters
----------
p: array_like, optional
array_like object with the values as floats representing the
model parameters.
tmin: string, optional
tmax: string, optional
freq: string, optional
dt: float, optional
Time step to use in the recharge calculation.
istress: int, optional
This only works for the Linear model!
Returns
-------
pandas.Series
"""
if p is None:
p = self.parameters.initial.values
b = self._get_block(p[:-self.recharge.nparam], dt, tmin, tmax)
stress = self.get_stress(p=p, tmin=tmin, tmax=tmax, freq=freq,
istress=istress).values
name = self.name
if istress is not None:
if istress == 1 and self.nsplit > 1:
# only happen when Linear is used as the recharge model
stress = stress * p[-1]
if self.stress[istress].name is not None:
name = f"{self.name} ({self.stress[istress].name})"
return Series(data=fftconvolve(stress, b, 'full')[:stress.size],
index=self.prec.series.index, name=name, fastpath=True)
def get_stress(self, p=None, tmin=None, tmax=None, freq=None,
istress=None, **kwargs):
"""Method to obtain the recharge stress calculated by the recharge
model.
Parameters
----------
p: array_like, optional
array_like object with the values as floats representing the
model parameters.
tmin: string, optional
tmax: string, optional
freq: string, optional
istress: int, optional
Return one of the stresses used for the recharge calculation.
0 for precipitation, 1 for evaporation and 2 for temperature.
kwargs
Returns
-------
stress: pandas.Series
When no istress is selected, this return the estimated recharge
flux that is convoluted with a response function on the
"simulate" method.
"""
if tmin is None:
tmin = self.tmin
if tmax is None:
tmax = self.tmax
self.update_stress(tmin=tmin, tmax=tmax, freq=freq)
if istress is None:
prec = self.prec.series.values
evap = self.evap.series.values
if self.temp is not None:
temp = self.temp.series.values
else:
temp = None
if p is None:
p = self.parameters.initial.values
stress = self.recharge.simulate(prec=prec, evap=evap, temp=temp,
p=p[-self.recharge.nparam:])
return Series(data=stress, index=self.prec.series.index,
name="recharge", fastpath=True)
elif istress == 0:
return self.prec.series
elif istress == 1:
return self.evap.series
else:
return self.temp.series
def get_water_balance(self, p=None, tmin=None, tmax=None, freq=None):
"""Experimental method to obtain the water balance components.
Parameters
----------
p: array_like, optional
array_like object with the values as floats representing the
model parameters.
tmin: string, optional
tmax: string, optional
freq: string, optional
Returns
-------
wb: pandas.DataFrame
Dataframe with the water balance components, both fluxes and
states.
Notes
-----
This method return a data frame with all water balance components,
fluxes and states. All ingoing fluxes have a positive sign (e.g.,
precipitation) and all outgoing fluxes have negative sign (e.g.,
recharge).
Warning
-------
This is an experimental method and may change in the future.
Examples
--------
>>> sm = ps.RechargeModel(prec, evap, ps.Gamma, ps.rch.FlexModel(),
>>> name="rch")
>>> ml.add_stressmodel(sm)
>>> ml.solve()
>>> wb = sm.get_water_balance(ml.get_parameters("rch"))
>>> wb.plot(subplots=True)
"""
if p is None:
p = self.parameters.initial.values
prec = self.get_stress(tmin=tmin, tmax=tmax, freq=freq,
istress=0).values
evap = self.get_stress(tmin=tmin, tmax=tmax, freq=freq,
istress=1).values
df = self.recharge.get_water_balance(prec=prec, evap=evap, temp=None,
p=p[-self.recharge.nparam:])
df.index = self.prec.series.index
return df
def to_dict(self, series=True):
data = {
"stressmodel": self._name,
"prec": self.prec.to_dict(series=series),
"evap": self.evap.to_dict(series=series),
"rfunc": self.rfunc._name,
"name": self.name,
"recharge": self.recharge._name,
"cutoff": self.rfunc.cutoff,
"temp": self.temp.to_dict() if self.temp else None
}
return data
class TarsoModel(RechargeModel):
"""Stressmodel simulating the effect of recharge using the Tarso method.
Parameters
----------
oseries: pandas.Series or pastas.TimeSeries, optional
A series of observations on which the model will be calibrated. It is
used to determine the initial values of the drainage levels and the
boundaries of the upper drainage level. Specify either oseries or dmin
and dmax.
dmin: float, optional
The minimum drainage level. It is used to determine the initial values
of the drainage levels and the lower boundary of the upper drainage
level. Specify either oseries or dmin and dmax.
dmax : float, optional
The maximum drainage level. It is used to determine the initial values
of the drainage levels and the upper boundary of the upper drainage
level. Specify either oseries or dmin and dmax.
rfunc: pastas.rfunc
this model only works with the Exponential response function.
See Also
--------
pastas.recharge
Notes
-----
The Threshold autoregressive self-exciting open-loop (Tarso) model
[knotters_1999]_ is nonlinear in structure because it incorporates two
regimes which are separated by a threshold. This model method can be
used to simulate a groundwater system where the groundwater head reaches
the surface or drainage level in wet conditions. TarsoModel uses two
drainage levels, with two exponential response functions. When the
simulation reaches the second drainage level, the second response
function becomes active. Because of its structure, TarsoModel cannot be
combined with other stress models, a constant or a transform.
TarsoModel inherits from RechargeModel. Only parameters specific to the
child class are named above.
References
----------
.. [knotters_1999] Knotters, M. & De Gooijer, Jan G.. (1999). TARSO
modeling of water table depths. Water Resources Research. 35.
10.1029/1998WR900049.
"""
_name = "TarsoModel"
def __init__(self, prec, evap, oseries=None, dmin=None, dmax=None,
rfunc=Exponential, **kwargs):
check_numba()
if oseries is not None:
if dmin is not None or dmax is not None:
msg = 'Please specify either oseries or dmin and dmax'
raise (Exception(msg))
o = TimeSeries(oseries).series
dmin = o.min()
dmax = o.max()
elif dmin is None or dmax is None:
msg = 'Please specify either oseries or dmin and dmax'
raise (Exception(msg))
if not issubclass(rfunc, Exponential):
raise NotImplementedError("TarsoModel only supports rfunc "
"Exponential!")
self.dmin = dmin
self.dmax = dmax
super().__init__(prec=prec, evap=evap, rfunc=rfunc, **kwargs)
def set_init_parameters(self):
# parameters for the first drainage level
p0 = self.rfunc.get_init_parameters(self.name)
one = One(meanstress=self.dmin + 0.5 * (self.dmax - self.dmin))
pd0 = one.get_init_parameters(self.name).squeeze()
p0.loc[f'{self.name}_d'] = pd0
p0.index = [f'{x}0' for x in p0.index]
# parameters for the second drainage level
p1 = self.rfunc.get_init_parameters(self.name)
initial = self.dmin + 0.75 * (self.dmax - self.dmin)
pd1 = Series({'initial': initial, 'pmin': self.dmin, 'pmax': self.dmax,
'vary': True, 'name': self.name})
p1.loc[f'{self.name}_d'] = pd1
p1.index = [f'{x}1' for x in p1.index]
# parameters for the recharge-method
pr = self.recharge.get_init_parameters(self.name)
# combine all parameters
self.parameters = concat([p0, p1, pr])
def simulate(self, p=None, tmin=None, tmax=None, freq=None, dt=1):
stress = self.get_stress(p=p, tmin=tmin, tmax=tmax, freq=freq)
h = self.tarso(p[:-self.recharge.nparam], stress.values, dt)
sim = Series(h, name=self.name, index=stress.index)
return sim
def to_dict(self, series=True):
data = super().to_dict(series)
data['dmin'] = self.dmin
data['dmax'] = self.dmax
return data
@staticmethod
def _check_stressmodel_compatibility(ml):
"""Internal method to check if no other stressmodels, a constants or a
transform is used."""
msg = "A TarsoModel cannot be combined with %s. Either remove the" \
" TarsoModel or the %s."
if len(ml.stressmodels) > 1:
logger.warning(msg, "other stressmodels", "stressmodels")
if ml.constant is not None:
logger.warning(msg, "a constant", "constant")
if ml.transform is not None:
logger.warning(msg, "a transform", "transform")
@staticmethod
@njit
def tarso(p, r, dt):
"""Calculates the head based on exponential decay of the previous
timestep and recharge, using two thresholds."""
A0, a0, d0, A1, a1, d1 = p
# calculate physical meaning of these parameters
S0 = a0 / A0
c0 = A0
S1 = a1 / A1
c1 = A1
# calculate effective parameters for the top level
c_e = 1 / ((1 / c0) + (1 / c1))
d_e = (c1 / (c0 + c1)) * d0 + (c0 / (c0 + c1)) * d1
a_e = S1 * c_e
h = np.full(len(r), np.NaN)
for i in range(len(r)):
if i == 0:
h0 = (d0 + d1) / 2
high = h0 > d1
if high:
S, a, c, d = S1, a_e, c_e, d_e
else:
S, a, c, d = S0, a0, c0, d0
else:
h0 = h[i - 1]
exp_a = np.exp(-dt / a)
h[i] = (h0 - d) * exp_a + r[i] * c * (1 - exp_a) + d
newhigh = h[i] > d1
if high != newhigh:
# calculate time until d1 is reached
dtdr = - S * c * np.log(
(d1 - d - r[i] * c) / (h0 - d - r[i] * c))
if dtdr > dt:
raise (Exception())
# change parameters
high = newhigh
if high:
S, a, c, d = S1, a_e, c_e, d_e
else:
S, a, c, d = S0, a0, c0, d0
# calculate new level after reaching d1
exp_a = np.exp(-(dt - dtdr) / a)
h[i] = (d1 - d) * exp_a + r[i] * c * (1 - exp_a) + d
return h
|
mit
|
Stargrazer82301/CAAPR
|
CAAPR/CAAPR_AstroMagic/PTS/pts/eagle/plotgreybodyfit.py
|
1
|
4909
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
# *****************************************************************
# ** PTS -- Python Toolkit for working with SKIRT **
# ** © Astronomical Observatory, Ghent University **
# *****************************************************************
## \package pts.eagle.plotgreybodyfit Plot a modified blackbody fit to
# the dust continuum emission for an EAGLE SKIRT-run.
#
# The facilities in this module serve to plot a modified blackbody fit to
# the dust continuum emission for a particular EAGLE SKIRT-run.
# ----------------------------------------------------------------------
# use a non-interactive back-end to generate high-quality vector graphics
import matplotlib.pyplot as plt
# import standard modules
import os.path
import numpy as np
# import pts modules
from ..core.tools import archive as arch
from ..core.basics.filter import Filter
from ..core.basics.greybody import GreyBody, kappa350_Cortese
# ----------------------------------------------------------------------
## This function creates a PDF plot of a modified blackbody fit to
# the dust continuum emission for a particular EAGLE SKIRT-run,
# also listing the corresponding temperature.
# The output plot is placed in the SKIRT-run's visualization directory.
def plotgreybodyfit(skirtrun):
simulation = skirtrun.simulation()
# setup the figure
figure = plt.figure(figsize=(10,6))
plt.xscale('log')
plt.yscale('log')
# load and plot the total SED
filepath = simulation.seddatpaths()[0]
lambdav, fluxv = np.loadtxt(arch.opentext(filepath), usecols=(0,1), unpack=True)
lambdav = simulation.convert(lambdav, to_unit='micron', quantity='wavelength')
fluxv = simulation.convert(fluxv, to_unit='Jy', quantity='fluxdensity', wavelength=lambdav)
plot = (lambdav>=10) & (lambdav<=1000)
plt.plot(lambdav[plot], fluxv[plot], color='b', label="SKIRT galaxy SED")
# load and plot the contributions from HII particles (stellar emission) and gas particles (dust emission)
# --> we do this inside a try block because these columns are not always available
try:
fstrdirv, fstrscav, ftotdusv = np.loadtxt(arch.opentext(filepath), usecols=(2,3,4), unpack=True)
fstrdirv = simulation.convert(fstrdirv, to_unit='Jy', quantity='fluxdensity', wavelength=lambdav)
fstrscav = simulation.convert(fstrscav, to_unit='Jy', quantity='fluxdensity', wavelength=lambdav)
ftotdusv = simulation.convert(ftotdusv, to_unit='Jy', quantity='fluxdensity', wavelength=lambdav)
plt.plot(lambdav[plot], fstrdirv[plot]+fstrscav[plot], color='c', ls="dashed", label=" contribution from HII regions")
plt.plot(lambdav[plot], ftotdusv[plot], color='y', ls="dashed", label=" contribution from other dust")
except:
pass
# load and plot the Herschel continuum data points (160, 250, 350, 500 micron)
info = { }
infofile = arch.listdir(skirtrun.vispath(), "_info.txt")[0]
for line in arch.opentext(os.path.join(skirtrun.vispath(),infofile)):
if not line.startswith("#"):
key,dummy,value = line.split(None, 2)
info[key] = float(value)
waves = np.array( [ Filter(fs).pivotwavelength() for fs in ("Pacs.red","SPIRE.PSW","SPIRE.PMW","SPIRE.PLW")] )
fluxes = np.array(( info['instr_xy_fluxdensity_pacs_red_continuum'],
info['instr_xy_fluxdensity_spire_psw_continuum'],
info['instr_xy_fluxdensity_spire_pmw_continuum'],
info['instr_xy_fluxdensity_spire_plw_continuum'] ))
sigmas = np.array(( 3,1,1,3 )) # pacs is less sensitive; longer wavelength fluxes are harder to measure
plt.scatter(waves, fluxes, color='r', marker='*', label="Mock PACS/SPIRE fluxes")
# fit a grey body to the Herschel fluxes and plot the result
greybody = GreyBody(simulation.instrumentdistance(), 2, kappa350_Cortese)
T,M = greybody.fit(waves, fluxes, sigmas)
plt.plot(lambdav[plot], greybody(lambdav[plot], T, M), color='m',
label=r"Grey body fit $T={:.2f},\,M_\mathrm{{dust}}={:.2e}\,M_\odot$".format(T,M))
# add axis labels, legend and title
plt.grid('on')
plt.xlabel(r"$\lambda\,(\mu \mathrm{m})$", fontsize='medium')
plt.ylabel(simulation.fluxlabel(), fontsize='medium')
plt.xlim(10, 1000)
ymax = fluxv[plot].max()
plt.ylim(ymax*1.1e-3, ymax*1.1)
plt.legend(loc='upper left', prop={'size':'small'})
plt.title("runid {} -- {}".format(skirtrun.runid(), skirtrun.prefix()), fontsize='medium')
# save the figure
plotpath = os.path.join(skirtrun.vispath(), skirtrun.prefix()+"_dust_body_fit.pdf")
plt.savefig(plotpath, bbox_inches='tight', pad_inches=0.25)
plt.close()
print "Created PDF plot file " + plotpath
# ----------------------------------------------------------------------
|
mit
|
agiovann/Constrained_NMF
|
use_cases/granule_cells/prepare_nice_image.py
|
2
|
39221
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Created on Sun Jul 24 17:06:17 2016
@author: agiovann
"""
from __future__ import division
from __future__ import print_function
#%%
from builtins import str
from builtins import range
from past.utils import old_div
try:
get_ipython().magic('load_ext autoreload')
get_ipython().magic('autoreload 2')
print((1))
except:
print('Not launched under iPython')
import matplotlib as mpl
mpl.use('TKAgg')
from matplotlib import pyplot as plt
# plt.ion()
import sys
import numpy as np
# sys.path.append('../SPGL1_python_port')
#%
from time import time
from scipy.sparse import coo_matrix
import tifffile
import subprocess
import time as tm
from time import time
import pylab as pl
import psutil
import glob
import os
import scipy
from ipyparallel import Client
import caiman as cm
from caiman.components_evaluation import evaluate_components
from caiman.utils.visualization import plot_contours, view_patches_bar
from caiman.base.rois import extract_binary_masks_blob
from caiman.source_extraction import cnmf as cnmf
#%%
# backend='SLURM'
backend = 'local'
if backend == 'SLURM':
n_processes = np.int(os.environ.get('SLURM_NPROCS'))
else:
# roughly number of cores on your machine minus 1
n_processes = np.maximum(np.int(psutil.cpu_count()), 1)
print(('using ' + str(n_processes) + ' processes'))
#%% start cluster for efficient computation
single_thread = False
if single_thread:
dview = None
else:
try:
c.close()
except:
print('C was not existing, creating one')
print("Stopping cluster to avoid unnencessary use of memory....")
sys.stdout.flush()
if backend == 'SLURM':
try:
cm.stop_server(is_slurm=True)
except:
print('Nothing to stop')
slurm_script = '/mnt/xfs1/home/agiovann/SOFTWARE/Constrained_NMF/SLURM/slurmStart.sh'
cm.start_server(slurm_script=slurm_script)
pdir, profile = os.environ['IPPPDIR'], os.environ['IPPPROFILE']
c = Client(ipython_dir=pdir, profile=profile)
else:
cm.stop_server()
cm.start_server()
c = Client()
print(('Using ' + str(len(c)) + ' processes'))
dview = c[:len(c)]
#%%
os.chdir('/mnt/ceph/users/agiovann/ImagingData/eyeblink/b38/20160706154257')
fls = []
for root, dirs, files in os.walk("."):
for file in files:
if file.endswith("1.npz"):
print((os.path.join(root, file)))
fls.append(os.path.join(root, file))
fls.sort()
for fl in fls:
print(fl)
with np.load(fl) as ld:
print((list(ld.keys())))
tmpls = ld['template']
lq, hq = np.percentile(tmpls, [5, 95])
pl.imshow(tmpls, cmap='gray', vmin=lq, vmax=hq)
pl.pause(.001)
pl.cla()
#%%
all_movs = []
for f in fls:
with np.load(f) as fl:
print(f)
# pl.subplot(1,2,1)
# pl.imshow(fl['template'],cmap=pl.cm.gray)
# pl.subplot(1,2,2)
all_movs.append(fl['template'][np.newaxis, :, :])
# pl.plot(fl['shifts'])
# pl.pause(.001)
all_movs = cb.movie(np.concatenate(all_movs, axis=0), fr=30)
all_movs, shifts, _, _ = all_movs.motion_correct(20, 20, template=None)
all_movs[30:80].play(backend='opencv', gain=5., fr=10)
all_movs = all_movs[30:80]
fls = fls[30:80]
final_template = np.median(all_movs, 0)
#%%
new_fls = []
for fl in fls:
new_fls.append(fl[:-3] + 'tif')
#%%
file_res = cb.motion_correct_parallel(new_fls, fr=6, template=final_template, margins_out=0,
max_shift_w=25, max_shift_h=25, dview=c[:], apply_smooth=True, save_hdf5=False, remove_blanks=False)
#%%
xy_shifts = []
for fl in new_fls:
if os.path.exists(fl[:-3] + 'npz'):
print((fl[:-3] + 'npz'))
with np.load(fl[:-3] + 'npz') as ld:
xy_shifts.append(ld['shifts'])
else:
raise Exception('*********************** ERROR, FILE NOT EXISTING!!!')
#%%
resize_facts = (1, 1, .2)
name_new = cm.save_memmap_each(
new_fls, dview=c[:], base_name=None, resize_fact=resize_facts, remove_init=0, xy_shifts=xy_shifts)
#%%
fname_new = cm.save_memmap_join(
name_new, base_name='TOTAL_', n_chunks=6, dview=c[:])
#%%
m = cm.load('TOTAL__d1_512_d2_512_d3_1_order_C_frames_2300_.mmap', fr=6)
#%%
tmp = np.median(m, 0)
#%%
Cn = m.local_correlations(eight_neighbours=True, swap_dim=False)
pl.imshow(Cn, cmap='gray')
#%%
lq, hq = np.percentile(tmp, [10, 98])
pl.imshow(tmp, cmap='gray', vmin=lq, vmax=hq)
#%%
pl.imshow(tmp[10:160, 120:450], cmap='gray', vmin=lq, vmax=hq)
#%%
m1 = m[:, 10:160, 120:450]
m1.save('MOV_EXAMPLE_20160706154257.tif')
#%%
name_new = cm.save_memmap_each(
['MOV_EXAMPLE_20160706154257.tif'], dview=c[:], base_name=None)
#%%
n_chunks = 6 # increase this number if you have memory issues at this point
fname_new = cm.save_memmap_join(
name_new, base_name='MOV_EXAMPLE_20160706154257__', n_chunks=6, dview=dview)
#%%
Yr, dims, T = cm.load_memmap(
'MOV_EXAMPLE_20160706154257___d1_150_d2_330_d3_1_order_C_frames_2300_.mmap')
d1, d2 = dims
images = np.reshape(Yr.T, [T] + list(dims), order='F')
Y = np.reshape(Yr, dims + (T,), order='F')
#%%
Cn = cm.local_correlations(Y[:, :, :3000], swap_dim=True)
pl.imshow(Cn, cmap='gray')
#%%
rf = 10 # half-size of the patches in pixels. rf=25, patches are 50x50
stride = 4 # amounpl.it of overlap between the patches in pixels
K = 4 # number of neurons expected per patch
gSig = [5, 5] # expected half size of neurons
merge_thresh = 0.8 # merging threshold, max correlation allowed
p = 2 # order of the autoregressive system
memory_fact = 1 # unitless number accounting how much memory should be used. You will need to try different values to see which one would work the default is OK for a 16 GB system
save_results = False
#%% RUN ALGORITHM ON PATCHES
cnm = cnmf.CNMF(n_processes, k=K, gSig=gSig, merge_thresh=0.8, p=0, dview=dview, Ain=None,
rf=rf, stride=stride, memory_fact=memory_fact,
method_init='greedy_roi', alpha_snmf=10e2)
cnm = cnm.fit(images)
A_tot = cnm.A
C_tot = cnm.C
YrA_tot = cnm.YrA
b_tot = cnm.b
f_tot = cnm.f
sn_tot = cnm.sn
print(('Number of components:' + str(A_tot.shape[-1])))
#%%
final_frate = 2 # approx final rate (after eventual downsampling )
tB = np.minimum(-2, np.floor(-5. / 30 * final_frate))
tA = np.maximum(5, np.ceil(25. / 30 * final_frate))
Npeaks = 10
traces = C_tot + YrA_tot
# traces_a=traces-scipy.ndimage.percentile_filter(traces,8,size=[1,np.shape(traces)[-1]/5])
# traces_b=np.diff(traces,axis=1)
fitness_raw, fitness_delta, erfc_raw, erfc_delta, r_values, significant_samples = evaluate_components(
Y, traces, A_tot, C_tot, b_tot, f_tot, remove_baseline=True, N=5, robust_std=False, Athresh=0.1, Npeaks=Npeaks, tB=tB, tA=tA, thresh_C=0.3)
idx_components_r = np.where(r_values >= .4)[0]
idx_components_raw = np.where(fitness_raw < -20)[0]
idx_components_delta = np.where(fitness_delta < -10)[0]
idx_components = np.union1d(idx_components_r, idx_components_raw)
idx_components = np.union1d(idx_components, idx_components_delta)
idx_components_bad = np.setdiff1d(list(range(len(traces))), idx_components)
print(('Keeping ' + str(len(idx_components)) +
' and discarding ' + str(len(idx_components_bad))))
#%%
pl.figure()
crd = plot_contours(A_tot.tocsc()[:, idx_components], Cn, thr=0.9)
#%%
A_tot = A_tot.tocsc()[:, idx_components]
C_tot = C_tot[idx_components]
#%%
cnm = cnmf.CNMF(n_processes, k=A_tot.shape, gSig=gSig, merge_thresh=merge_thresh, p=p, dview=dview, Ain=A_tot, Cin=C_tot,
f_in=f_tot, rf=None, stride=None)
cnm = cnm.fit(images)
#%%
A, C, b, f, YrA, sn = cnm.A, cnm.C, cnm.b, cnm.f, cnm.YrA, cnm.sn
#%%
final_frate = 1
tB = np.minimum(-2, np.floor(-5. / 30 * final_frate))
tA = np.maximum(5, np.ceil(25. / 30 * final_frate))
Npeaks = 10
traces = C + YrA
# traces_a=traces-scipy.ndimage.percentile_filter(traces,8,size=[1,np.shape(traces)[-1]/5])
# traces_b=np.diff(traces,axis=1)
fitness_raw, fitness_delta, erfc_raw, erfc_delta, r_values, significant_samples = \
evaluate_components(Y, traces, A, C, b, f, remove_baseline=True,
N=5, robust_std=False, Athresh=0.1, Npeaks=Npeaks, tB=tB, tA=tA, thresh_C=0.3)
idx_components_r = np.where(r_values >= .5)[0]
idx_components_raw = np.where(fitness_raw < -50)[0]
idx_components_delta = np.where(fitness_delta < -30)[0]
min_radius = gSig[0] - 2
# masks_ws, idx_blobs, idx_non_blobs = extract_binary_masks_blob(
# A.tocsc(), min_radius, dims, num_std_threshold=1,
# minCircularity=0.5, minInertiaRatio=0.2, minConvexity=.7)
#% LOOK FOR BLOB LIKE STRUCTURES!
masks_ws, is_blob, is_non_blob = cm.base.rois.extract_binary_masks_blob_parallel(A.tocsc(), min_radius, dims, num_std_threshold=1,
minCircularity=0.1, minInertiaRatio=0.1, minConvexity=.1, dview=dview)
idx_blobs = np.where(is_blob)[0]
idx_non_blobs = np.where(is_non_blob)[0]
idx_components = np.union1d(idx_components_r, idx_components_raw)
idx_components = np.union1d(idx_components, idx_components_delta)
idx_blobs = np.intersect1d(idx_components, idx_blobs)
idx_components_bad = np.setdiff1d(list(range(len(traces))), idx_components)
print(' ***** ')
print((len(traces)))
print((len(idx_components)))
print((len(idx_blobs)))
#%%
save_results = False
if save_results:
np.savez('results_analysis.npz', Cn=Cn, A=A.todense(), C=C, b=b, f=f, YrA=YrA, sn=sn,
d1=d1, d2=d2, idx_components=idx_components, idx_components_bad=idx_components_bad)
scipy.io.savemat('results_analysis.mat', {'C': Cn, 'A': A.toarray(), 'C': C, 'b': b, 'f': f, 'YrA': YrA,
'sn': sn, 'd1': d1, 'd2': d2, 'idx_components': idx_components, 'idx_components_blobs': idx_blobs})
np.savez('results_blobs.npz', spatial_comps=A.tocsc().toarray().reshape(dims + (-1,), order='F').transpose(
[2, 0, 1]), masks=masks_ws, idx_components=idx_components, idx_blobs=idx_blobs, idx_components_bad=idx_components_bad)
#%% visualize components
# pl.figure();
pl.subplot(1, 3, 1)
crd = plot_contours(A.tocsc()[:, idx_components], Cn, thr=0.9)
pl.subplot(1, 3, 2)
crd = plot_contours(A.tocsc()[:, idx_blobs], Cn, thr=0.9)
pl.subplot(1, 3, 3)
crd = plot_contours(A.tocsc()[:, idx_components_bad], Cn, thr=0.9)
#%%
#idx_very_nice=[2, 19, 23, 27,32,43,45,49,51,94,100]
# idx_very_nice=np.array(idx_very_nice)[np.array([3,4,8,10])]
# idx_very_nice=idx_blobs[idx_very_nice]
idx_very_nice = idx_blobs
view_patches_bar(Yr, scipy.sparse.coo_matrix(A.tocsc()[:, idx_very_nice]), C[
idx_very_nice, :], b, f, dims[0], dims[1], YrA=YrA[idx_very_nice, :], img=Cn)
#%%
new_m = cm.movie(np.reshape(A.tocsc()[
:, idx_blobs] * C[idx_blobs] + b.dot(f), dims + (-1,), order='F').transpose([2, 0, 1]))
new_m.play(fr=30, backend='opencv', gain=7., magnification=3.)
#%%
new_m = cm.movie(np.reshape(A.tocsc()[:, idx_blobs] * C[idx_blobs] +
b * np.median(f), dims + (-1,), order='F').transpose([2, 0, 1]))
new_m.play(fr=30, backend='opencv', gain=7., magnification=3.)
#%%
new_m = cm.movie(np.reshape(A.tocsc()[
:, idx_blobs] * C[idx_blobs], dims + (-1,), order='F').transpose([2, 0, 1]))
new_m.play(fr=30, backend='opencv', gain=30., magnification=3.)
#%%
# idx_to_show=[0,1,5,8,14,17,18,23,24,25,26,28,29,31,32,33,34,36,43,45,47,51,53,54,57,60,61,62,63,64,65,66,67,71,72,74,75,78,79,80,81,91,95,96,97,99,102]
#cm.view_patches_bar(Yr,scipy.sparse.coo_matrix(A.tocsc()[:,sure_in_idx[idx_to_show]]),C[sure_in_idx[idx_to_show],:],b,f, dims[0],dims[1], YrA=YrA[sure_in_idx[idx_to_show],:],img=np.mean(Y,-1))
#%%
# idx_to_show=[0,1,5,8,14,17,18,23,24,25,26,28,29,31,32,33,34,36,43,45,47,51,53,54,57,60,61,62,63,64,65,66,67,71,72,74,75,78,79,80,81,91,95,96,97,99,102]
# idx_to_show=np.array(idx_to_show)[[2,19,23,26,34]]
#%%
import numpy as np
import caiman as cm
import scipy
with np.load('results_analysis.npz') as ld:
locals().update(ld)
A = scipy.sparse.coo_matrix(A)
with np.load('results_blobs.npz') as ld:
locals().update(ld)
m = cm.load(
'MOV_EXAMPLE_20160706154257___d1_150_d2_330_d3_1_order_C_frames_2300_.mmap')
Yr, dims, T = cm.load_memmap(
'MOV_EXAMPLE_20160706154257___d1_150_d2_330_d3_1_order_C_frames_2300_.mmap')
d1, d2 = dims
images = np.reshape(Yr.T, [T] + list(dims), order='F')
Y = np.reshape(Yr, dims + (T,), order='F')
#%%
ylimit = 100
pl.figure()
pl.subplot(3, 1, 1)
pl.imshow(np.mean(Y, -1), cmap='gray', vmin=10, vmax=60)
pl.ylim([0, ylimit])
pl.axis('off')
pl.subplot(3, 1, 2)
pl.imshow(np.mean(Y, -1), cmap='gray', vmin=10, vmax=60)
msk = np.reshape(A.tocsc()[:, sure_in_idx[7]].sum(-1), dims, order='F')
msk[msk < 0.01] = np.nan
pl.imshow(msk, cmap='Greens', alpha=.3)
msk = np.reshape(
A.tocsc()[:, sure_in_idx[idx_to_show]].sum(-1), dims, order='F')
msk[msk < 0.01] = np.nan
pl.ylim([0, ylimit])
pl.imshow(msk, cmap='hot', alpha=.3)
pl.axis('off')
pl.subplot(3, 1, 3)
pl.imshow(np.reshape(
A.tocsc()[:, sure_in_idx[idx_to_show]].mean(-1), dims, order='F'), cmap='hot')
pl.ylim([0, ylimit])
pl.axis('off')
font = {'family': 'Myriad Pro',
'weight': 'regular',
'size': 30}
pl.rc('font', **font)
#%
pl.figure()
counter = 0
for iid in sure_in_idx[np.hstack([idx_to_show, 7])]:
counter += 1
pl.subplot(7, 7, counter)
mmsk = np.reshape(A.tocsc()[:, iid].todense(), dims, order='F')
cx, cy = scipy.ndimage.measurements.center_of_mass(np.array(mmsk))
cx = np.int(cx)
cy = np.int(cy)
print((cx, cy))
pl.imshow(mmsk[np.maximum(cx - 15, 0):cx + 15,
np.maximum(cy - 15, 0):cy + 15], cmap='gray')
pl.ylim([0, 30])
pl.axis('off')
pl.title(np.hstack([idx_to_show, 7])[counter - 1])
font = {'family': 'Myriad Pro',
'weight': 'regular',
'size': 30}
pl.rc('font', **font)
#%
pl.figure()
m = np.array(Yr)
bckg_1 = b.dot(f)
nA = (A.power(2)).sum(0)
m = m - bckg_1
Y_r_sig = A.T.dot(m)
Y_r_sig = scipy.sparse.linalg.spsolve(
scipy.sparse.spdiags(np.sqrt(nA), 0, nA.size, nA.size), Y_r_sig)
Y_r_bl = A.T.dot(bckg_1)
Y_r_bl = scipy.sparse.linalg.spsolve(
scipy.sparse.spdiags(np.sqrt(nA), 0, nA.size, nA.size), Y_r_bl)
Y_r_bl = cm.mode_robust(Y_r_bl, 1)
trs = old_div(Y_r_sig, Y_r_bl[:, np.newaxis])
cb.trace(trs[np.hstack([sure_in_idx[idx_to_show], 7])].T, fr=6).plot()
# pl.figure()
# cb.trace(trs[sure_in_idx[7]].T,fr=6).plot()
# %%
# done '/mnt/ceph/users/agiovann/ImagingData/eyeblink/b37/20160627105123/'
# errors: '/mnt/ceph/users/agiovann/ImagingData/eyeblink/b37/20160623161504/',
# base_folders=[
# '/mnt/ceph/users/agiovann/ImagingData/eyeblink/b37/20160627154015/',
# '/mnt/ceph/users/agiovann/ImagingData/eyeblink/b37/20160624105838/',
# '/mnt/ceph/users/agiovann/ImagingData/eyeblink/b37/20160625132042/',
# '/mnt/ceph/users/agiovann/ImagingData/eyeblink/b37/20160626175708/',
# '/mnt/ceph/users/agiovann/ImagingData/eyeblink/b37/20160627110747/',
# '/mnt/ceph/users/agiovann/ImagingData/eyeblink/b37/20160628100247/',
# '/mnt/ceph/users/agiovann/ImagingData/eyeblink/b37/20160705103903/',
#
# '/mnt/ceph/users/agiovann/ImagingData/eyeblink/b37/20160628162522/',
# '/mnt/ceph/users/agiovann/ImagingData/eyeblink/b37/20160629123648/',
# '/mnt/ceph/users/agiovann/ImagingData/eyeblink/b37/20160630120544/',
# '/mnt/ceph/users/agiovann/ImagingData/eyeblink/b37/20160701113525/',
# '/mnt/ceph/users/agiovann/ImagingData/eyeblink/b37/20160702152950/',
# '/mnt/ceph/users/agiovann/ImagingData/eyeblink/b37/20160703173620/',
# '/mnt/ceph/users/agiovann/ImagingData/eyeblink/b37/20160704130454/',
# ]
# error: '/mnt/ceph/users/agiovann/ImagingData/eyeblink/b35/20160711104450/',
# '/mnt/ceph/users/agiovann/ImagingData/eyeblink/b35/20160712105933/',
# base_folders=[
# '/mnt/ceph/users/agiovann/ImagingData/eyeblink/b35/20160710134627/',
# '/mnt/ceph/users/agiovann/ImagingData/eyeblink/b35/20160710193544/',
# '/mnt/ceph/users/agiovann/ImagingData/eyeblink/b35/20160711164154/',
# '/mnt/ceph/users/agiovann/ImagingData/eyeblink/b35/20160711212316/',
# '/mnt/ceph/users/agiovann/ImagingData/eyeblink/b35/20160712101950/',
# '/mnt/ceph/users/agiovann/ImagingData/eyeblink/b35/20160712173043/',
# '/mnt/ceph/users/agiovann/ImagingData/eyeblink/b35/20160713100916/',
# '/mnt/ceph/users/agiovann/ImagingData/eyeblink/b35/20160713171246/',
# '/mnt/ceph/users/agiovann/ImagingData/eyeblink/b35/20160714094320/',
# '/mnt/ceph/users/agiovann/ImagingData/eyeblink/b35/20160714143248/'
# ]
# for base_folder in base_folders:
# img_descr=cb.utils.get_image_description_SI(glob(base_folder+'2016*.tif')[0])[0]
# f_rate=img_descr['scanimage.SI.hRoiManager.scanFrameRate']
# print f_rate
# #%%
# fls=glob(os.path.join(base_folder,'2016*.tif'))
# fls.sort()
# print fls
# # verufy they are ordered
# #%%
# triggers_img,trigger_names_img=gc.extract_triggers(fls,read_dictionaries=False)
# np.savez(base_folder+'all_triggers.npz',triggers=triggers_img,trigger_names=trigger_names_img)
# #%% get information from eyelid traces
# t_start=time()
# camera_file=glob(os.path.join(base_folder,'*_cam2.h5'))
# assert len(camera_file)==1, 'there are none or two camera files'
# res_bt=gc.get_behavior_traces(camera_file[0],t0=0,t1=8.0,freq=60,ISI=.25,draw_rois=False,plot_traces=False,mov_filt_1d=True,window_lp=5)
# t_end=time()-t_start
# print t_end
# #%%
# np.savez(base_folder+'behavioral_traces.npz',**res_bt)
# #%%
# with np.load(base_folder+'behavioral_traces.npz') as ld:
# res_bt=dict(**ld)
# #%%
# pl.close()
# tm=res_bt['time']
# f_rate_bh=1/np.median(np.diff(tm))
# ISI=res_bt['trial_info'][0][3]-res_bt['trial_info'][0][2]
# eye_traces=np.array(res_bt['eyelid'])
# idx_CS_US=res_bt['idx_CS_US']
# idx_US=res_bt['idx_US']
# idx_CS=res_bt['idx_CS']
#
# idx_ALL=np.sort(np.hstack([idx_CS_US,idx_US,idx_CS]))
# eye_traces,amplitudes_at_US, trig_CRs=gc.process_eyelid_traces(eye_traces,tm,idx_CS_US,idx_US,idx_CS,thresh_CR=.15,time_CR_on=-.1,time_US_on=.05)
#
# idxCSUSCR = trig_CRs['idxCSUSCR']
# idxCSUSNOCR = trig_CRs['idxCSUSNOCR']
# idxCSCR = trig_CRs['idxCSCR']
# idxCSNOCR = trig_CRs['idxCSNOCR']
# idxNOCR = trig_CRs['idxNOCR']
# idxCR = trig_CRs['idxCR']
# idxUS = trig_CRs['idxUS']
# idxCSCSUS=np.concatenate([idx_CS,idx_CS_US])
#
#
# pl.plot(tm,np.mean(eye_traces[idxCSUSCR],0))
# pl.plot(tm,np.mean(eye_traces[idxCSUSNOCR],0))
# pl.plot(tm,np.mean(eye_traces[idxCSCR],0))
# pl.plot(tm,np.mean(eye_traces[idxCSNOCR],0))
# pl.plot(tm,np.mean(eye_traces[idx_US],0))
# pl.legend(['idxCSUSCR','idxCSUSNOCR','idxCSCR','idxCSNOCR','idxUS'])
# pl.xlabel('time to US (s)')
# pl.ylabel('eyelid closure')
# plt.axvspan(-ISI,ISI, color='g', alpha=0.2, lw=0)
# plt.axvspan(0,0.03, color='r', alpha=0.2, lw=0)
#
# pl.xlim([-.5,1])
# pl.savefig(base_folder+'behavioral_traces.pdf')
# #%%
# #pl.close()
# #bins=np.arange(0,1,.01)
# #pl.hist(amplitudes_at_US[idxCR],bins=bins)
# #pl.hist(amplitudes_at_US[idxNOCR],bins=bins)
# #pl.savefig(base_folder+'hist_behav.pdf')
#
#
# #%%
# pl.close()
# f_results= glob(base_folder+'*results_analysis.npz')
# f_results.sort()
# for rs in f_results:
# print rs
# #%% load results and put them in lists
# A_s,C_s,YrA_s, Cn_s, b_s, f_s, shape = gc.load_results(f_results)
# B_s, lab_imgs, cm_s = gc.threshold_components(A_s,shape, min_size=5,max_size=50,max_perc=.5)
# #%%
# if not batch_mode:
# for i,A_ in enumerate(B_s):
# sizes=np.array(A_.sum(0)).squeeze()
# pl.subplot(2,3,i+1)
# pl.imshow(np.reshape(A_.sum(1),shape,order='F'),cmap='gray',vmax=.5)
# #%% compute mask distances
# if len(B_s)>1:
# max_dist=30
# D_s=gc.distance_masks(B_s,cm_s,max_dist)
# np.savez(base_folder+'distance_masks.npz',D_s=D_s)
# #%%
# if not batch_mode:
# for ii,D in enumerate(D_s):
# pl.subplot(3,3,ii+1)
# pl.imshow(D,interpolation='None')
#
# #%% find matches
# matches,costs = gc.find_matches(D_s, print_assignment=False)
# #%%
# neurons=gc.link_neurons(matches,costs,max_cost=0.6,min_FOV_present=None)
# else:
# neurons=[np.arange(B_s[0].shape[-1])]
# #%%
# np.savez(base_folder+'neurons_matching.npz',matches=matches,costs=costs,neurons=neurons,D_s=D_s)
# #%%
# re_load = False
# if re_load:
# import calblitz as cb
# from calblitz.granule_cells import utils_granule as gc
# from glob import glob
# import numpy as np
# import os
# import scipy
# import pylab as pl
# import ca_source_extraction as cse
#
# if is_blob:
# with np.load(base_folder+'distance_masks.npz') as ld:
# D_s=ld['D_s']
# with np.load(base_folder+'neurons_matching.npz') as ld:
# locals().update(ld)
#
#
#
# with np.load(base_folder+'all_triggers.npz') as at:
# triggers_img=at['triggers']
# trigger_names_img=at['trigger_names']
#
# with np.load(base_folder+'behavioral_traces.npz') as ld:
# res_bt = dict(**ld)
# tm=res_bt['time']
# f_rate_bh=1/np.median(np.diff(tm))
# ISI=res_bt['trial_info'][0][3]-res_bt['trial_info'][0][2]
# eye_traces=np.array(res_bt['eyelid'])
# idx_CS_US=res_bt['idx_CS_US']
# idx_US=res_bt['idx_US']
# idx_CS=res_bt['idx_CS']
#
# idx_ALL=np.sort(np.hstack([idx_CS_US,idx_US,idx_CS]))
# eye_traces,amplitudes_at_US, trig_CRs=gc.process_eyelid_traces(eye_traces,tm,idx_CS_US,idx_US,idx_CS,thresh_CR=.15,time_CR_on=-.1,time_US_on=.05)
#
# idxCSUSCR = trig_CRs['idxCSUSCR']
# idxCSUSNOCR = trig_CRs['idxCSUSNOCR']
# idxCSCR = trig_CRs['idxCSCR']
# idxCSNOCR = trig_CRs['idxCSNOCR']
# idxNOCR = trig_CRs['idxNOCR']
# idxCR = trig_CRs['idxCR']
# idxUS = trig_CRs['idxUS']
# idxCSCSUS=np.concatenate([idx_CS,idx_CS_US])
#
#
# f_results= glob(base_folder+'*results_analysis.npz')
# f_results.sort()
# for rs in f_results:
# print rs
# print '*****'
# A_s,C_s,YrA_s, Cn_s, b_s, f_s, shape = gc.load_results(f_results)
# if is_blob:
# remove_unconnected_components=True
# else:
# remove_unconnected_components=False
#
# neurons=[]
# for xx in A_s:
# neurons.append(np.arange(A_s[0].shape[-1]))
#
# B_s, lab_imgs, cm_s = gc. threshold_components(A_s,shape, min_size=5,max_size=50,max_perc=.5,remove_unconnected_components=remove_unconnected_components)
# #%%
#
# row_cols=np.ceil(np.sqrt(len(A_s)))
# for idx,B in enumerate(A_s):
# pl.subplot(row_cols,row_cols,idx+1)
# pl.imshow(np.reshape(B[:,neurons[idx]].sum(1),shape,order='F'))
# pl.savefig(base_folder+'neuron_matches.pdf')
#
# #%%
# if not batch_mode:
# num_neurons=neurons[0].size
# for neuro in range(num_neurons):
# for idx,B in enumerate(A_s):
# pl.subplot(row_cols,row_cols,idx+1)
# pl.imshow(np.reshape(B[:,neurons[idx][neuro]].sum(1),shape,order='F'))
# pl.pause(.01)
# for idx,B in enumerate(A_s):
# pl.subplot(row_cols,row_cols,idx+1)
# pl.cla()
#
# #%%
# if 0:
# idx=0
# for row, column in zip(matches[idx][0],matches[idx][1]):
# value = D_s[idx][row,column]
# if value < .5:
# pl.cla()
# pl.imshow(np.reshape(B_s[idx][:,row].todense(),(512,512),order='F'),cmap='gray',interpolation='None')
# pl.imshow(np.reshape(B_s[idx+1][:,column].todense(),(512,512),order='F'),alpha=.5,cmap='hot',interpolation='None')
# if B_s[idx][:,row].T.dot(B_s[idx+1][:,column]).todense() == 0:
# print 'Flaw'
# pl.pause(.3)
#
# #%%
# tmpl_name=glob(base_folder+'*template_total.npz')[0]
# print tmpl_name
# with np.load(tmpl_name) as ld:
# mov_names_each=ld['movie_names']
#
#
# traces=[]
# traces_BL=[]
# traces_DFF=[]
# all_chunk_sizes=[]
#
# for idx, mov_names in enumerate(mov_names_each):
# idx=0
# A=A_s[idx][:,neurons[idx]]
# # C=C_s[idx][neurons[idx]]
# # YrA=YrA_s[idx][neurons[idx]]
# b=b_s[idx]
# f=f_s[idx]
# chunk_sizes=[]
# for mv in mov_names:
# base_name=os.path.splitext(os.path.split(mv)[-1])[0]
# with np.load(base_folder+base_name+'.npz') as ld:
# TT=len(ld['shifts'])
# chunk_sizes.append(TT)
#
#
# all_chunk_sizes.append(chunk_sizes)
#
# traces_,traces_DFF_,traces_BL_ = gc.generate_linked_traces(mov_names,chunk_sizes,A,b,f)
# traces=traces+traces_
# traces_DFF=traces_DFF+traces_DFF_
# traces_BL=traces_BL+traces_BL_
#
# #%%
# import pickle
# with open(base_folder+'traces.pk','w') as f:
# pickle.dump(dict(traces=traces,traces_BL=traces_BL,traces_DFF=traces_DFF),f)
#
# #%%
# if not batch_mode:
# with open(base_folder+'traces.pk','r') as f:
# locals().update(pickle.load(f) )
# #%%
# chunk_sizes=[]
# for idx,mvs in enumerate(mov_names_each):
# print idx
# for mv in mvs:
# base_name=os.path.splitext(os.path.split(mv)[-1])[0]
# with np.load(os.path.join(base_folder,base_name+'.npz')) as ld:
# TT=len(ld['shifts'])
# chunk_sizes.append(TT)
#
#
# min_chunk=np.min(chunk_sizes)
# max_chunk=np.max(chunk_sizes)
# num_chunks=np.sum(chunk_sizes)
# #%%
# import copy
# Ftraces=copy.deepcopy(traces_DFF[:])
#
# #%%
#
# #%%
# interpolate=False
# CS_ALONE=0
# US_ALONE= 1
# CS_US=2
#
# samples_before=np.int(2.8*f_rate)
# samples_after=np.int(7.3*f_rate)-samples_before
#
#
# if interpolate:
# Ftraces_mat=np.zeros([len(chunk_sizes),len(traces[0]),max_chunk])
# abs_frames=np.arange(max_chunk)
# else:
# Ftraces_mat=np.zeros([len(chunk_sizes),len(traces[0]),samples_after+samples_before])
#
# crs=idxCR
# nocrs=idxNOCR
# uss=idxUS
#
# triggers_img=np.array(triggers_img)
#
# idx_trig_CS=triggers_img[:][:,0]
# idx_trig_US=triggers_img[:][:,1]
# trial_type=triggers_img[:][:,2]
# length=triggers_img[:][:,-1]
# ISI=np.int(np.nanmedian(idx_trig_US)-np.nanmedian(idx_trig_CS))
#
# for idx,fr in enumerate(chunk_sizes):
#
# print idx
#
# if interpolate:
#
# if fr!=max_chunk:
#
# f1=scipy.interpolate.interp1d(np.arange(fr) , Ftraces[idx] ,axis=1, bounds_error=False, kind='linear')
# Ftraces_mat[idx]=np.array(f1(abs_frames))
#
# else:
#
# Ftraces_mat[idx]=Ftraces[idx][:,trigs_US-samples_before]
#
#
# else:
#
# if trial_type[idx] == CS_ALONE:
# Ftraces_mat[idx]=Ftraces[idx][:,np.int(idx_trig_CS[idx]+ISI-samples_before):np.int(idx_trig_CS[idx]+ISI+samples_after)]
# else:
# Ftraces_mat[idx]=Ftraces[idx][:,np.int(idx_trig_US[idx]-samples_before):np.int(idx_trig_US[idx]+samples_after)]
#
# #%%
# wheel_traces, movement_at_CS, trigs_mov = gc.process_wheel_traces(np.array(res_bt['wheel']),tm,thresh_MOV_iqr=1000,time_CS_on=-.25,time_US_on=0)
# print trigs_mov
# mn_idx_CS_US=np.intersect1d(idx_CS_US,trigs_mov['idxNO_MOV'])
# nm_idx_US=np.intersect1d(idx_US,trigs_mov['idxNO_MOV'])
# nm_idx_CS=np.intersect1d(idx_CS,trigs_mov['idxNO_MOV'])
# nm_idxCSUSCR = np.intersect1d(idxCSUSCR,trigs_mov['idxNO_MOV'])
# nm_idxCSUSNOCR = np.intersect1d(idxCSUSNOCR,trigs_mov['idxNO_MOV'])
# nm_idxCSCR = np.intersect1d(idxCSCR,trigs_mov['idxNO_MOV'])
# nm_idxCSNOCR = np.intersect1d(idxCSNOCR,trigs_mov['idxNO_MOV'])
# nm_idxNOCR = np.intersect1d(idxNOCR,trigs_mov['idxNO_MOV'])
# nm_idxCR = np.intersect1d(idxCR,trigs_mov['idxNO_MOV'])
# nm_idxUS = np.intersect1d(idxUS,trigs_mov['idxNO_MOV'])
# nm_idxCSCSUS=np.intersect1d(idxCSCSUS,trigs_mov['idxNO_MOV'])
# #%%
# threshold_responsiveness=0.1
# ftraces=Ftraces_mat.copy()
# ftraces=ftraces-np.median(ftraces[:,:,:samples_before-ISI],axis=(2))[:,:,np.newaxis]
# amplitudes_responses=np.mean(ftraces[:,:,samples_before+ISI-1:samples_before+ISI+1],-1)
# cell_responsiveness=np.median(amplitudes_responses[nm_idxCSCSUS],axis=0)
# fraction_responsive=len(np.where(cell_responsiveness>threshold_responsiveness)[0])*1./np.shape(ftraces)[1]
# print fraction_responsive
# ftraces=ftraces[:,cell_responsiveness>threshold_responsiveness,:]
# amplitudes_responses=np.mean(ftraces[:,:,samples_before+ISI-1:samples_before+ISI+1],-1)
# #%%
# np.savez('ftraces.npz',ftraces=ftraces,samples_before=samples_before,samples_after=samples_after,ISI=ISI)
#
#
# #%%pl.close()
# pl.close()
# t=np.arange(-samples_before,samples_after)/f_rate
# pl.plot(t,np.median(ftraces[nm_idxCR],axis=(0,1)),'-*')
# pl.plot(t,np.median(ftraces[nm_idxNOCR],axis=(0,1)),'-d')
# pl.plot(t,np.median(ftraces[nm_idxUS],axis=(0,1)),'-o')
# plt.axvspan((-ISI)/f_rate, 0, color='g', alpha=0.2, lw=0)
# plt.axvspan(0, 0.03, color='r', alpha=0.5, lw=0)
# pl.xlabel('Time to US (s)')
# pl.ylabel('DF/F')
# pl.xlim([-.5, 1])
# pl.legend(['CR+','CR-','US'])
# pl.savefig(base_folder+'eyelid_resp_by_trial.pdf')
#
# #%%
# if not batch_mode:
# pl.close()
# for cell in range(ftraces.shape[1]):
# # pl.cla()
# pl.subplot(11,10,cell+1)
# print cell
# tr_cr=np.median(ftraces[crs,cell,:],axis=(0))
# tr_nocr=np.median(ftraces[nocrs,cell,:],axis=(0))
# tr_us=np.median(ftraces[uss,cell,:],axis=(0))
# pl.imshow(ftraces[np.concatenate([uss,nocrs,crs]),cell,:],aspect='auto',vmin=0,vmax=1)
# pl.xlim([samples_before-10,samples_before+10])
# pl.axis('off')
# # pl.plot(tr_cr,'b')
# # pl.plot(tr_nocr,'g')
# # pl.plot(tr_us,'r')
# # pl.legend(['CR+','CR-','US'])
# # pl.pause(1)
# #%%
# import pandas
#
# bins=np.arange(-.1,.3,.05)
# n_bins=6
# dfs=[];
# dfs_random=[];
# x_name='ampl_eye'
# y_name='ampl_fl'
# for resps in amplitudes_responses.T:
# idx_order=np.arange(len(idxCSCSUS))
# dfs.append(pandas.DataFrame(
# {y_name: resps[idxCSCSUS[idx_order]],
# x_name: amplitudes_at_US[idxCSCSUS]}))
#
# idx_order=np.random.permutation(idx_order)
# dfs_random.append(pandas.DataFrame(
# {y_name: resps[idxCSCSUS[idx_order]],
# x_name: amplitudes_at_US[idxCSCSUS]}))
#
#
# r_s=[]
# r_ss=[]
#
# for df,dfr in zip(dfs,dfs_random): # random scramble
#
# if bins is None:
# [_,bins]=np.histogram(dfr.ampl_eye,n_bins)
# groups = dfr.groupby(np.digitize(dfr.ampl_eye, bins))
# grouped_mean = groups.mean()
# grouped_sem = groups.sem()
# (r,p_val)=scipy.stats.pearsonr(grouped_mean.ampl_eye,grouped_mean.ampl_fl)
# # r=np.corrcoef(grouped_mean.ampl_eye,grouped_mean.ampl_fl)[0,1]
#
# r_ss.append(r)
#
# if bins is None:
# [_,bins]=np.histogram(df.ampl_eye,n_bins)
#
# groups = df.groupby(np.digitize(df.ampl_eye, bins))
# grouped_mean = groups.mean()
# grouped_sem= groups.sem()
# (r,p_val)=scipy.stats.pearsonr(grouped_mean.ampl_eye,grouped_mean.ampl_fl)
# # r=np.corrcoef(grouped_mean.ampl_eye,grouped_mean.ampl_fl)[0,1]
# r_s.append(r)
# if r_s[-1]>.86:
# pl.subplot(1,2,1)
# print 'found'
# pl.errorbar(grouped_mean.ampl_eye,grouped_mean.ampl_fl,grouped_sem.ampl_fl.as_matrix(),grouped_sem.ampl_eye.as_matrix(),fmt='.')
# pl.scatter(grouped_mean.ampl_eye,grouped_mean.ampl_fl,s=groups.apply(len).values*3)#
# pl.xlabel(x_name)
# pl.ylabel(y_name)
#
# mu_scr=np.mean(r_ss)
#
# std_scr=np.std(r_ss)
# [a,b]=np.histogram(r_s,20)
#
# pl.subplot(1,2,2)
# pl.plot(b[1:],scipy.signal.savgol_filter(a,3,1))
# plt.axvspan(mu_scr-std_scr, mu_scr+std_scr, color='r', alpha=0.2, lw=0)
# pl.xlabel('correlation coefficients')
# pl.ylabel('bin counts')
# pl.savefig(base_folder+'correlations.pdf')
#
#
#
# #%%
# if not batch_mode:
# r_s=[]
# for resps in amplitudes_responses.T:
# r=np.corrcoef(amplitudes_at_US[idxCSCSUS],resps[idxCSCSUS])[0,1]
# # if r>.25:
# # pl.scatter(amplitudes_at_US[idxCSCSUS],resps[idxCSCSUS])
# # bins=np.arange(-.3,1.5,.2)
# # a,b=np.histogram(resps,bins)
# # new_dat=[]
# # for bb in a:
# #
# r_s.append(r)
# pl.xlabel('Amplitudes CR')
# pl.ylabel('Amplitudes GC responses')
#
# pl.hist(r_s)
#
# %%
###
# base_name='20160518133747_'
# cam1=base_name+'cam1.h5'
# cam2=base_name+'cam2.h5'
# meta_inf=base_name+'data.h5'
###
# mtot=[]
# eye_traces=[]
# tims=[]
# trial_info=[]
###
# with h5py.File(cam2) as f:
###
# with h5py.File(meta_inf) as dt:
###
# rois=np.asarray(dt['roi'],np.float32)
###
### trials = f.keys()
# trials.sort(key=lambda(x): np.int(x.replace('trial_','')))
### trials_idx=[np.int(x.replace('trial_',''))-1 for x in trials]
###
###
###
###
# for tr,idx_tr in zip(trials,trials_idx):
###
# print tr
###
# trial=f[tr]
###
# mov=np.asarray(trial['mov'])
###
# if 0:
###
# pl.imshow(np.mean(mov,0))
# pts=pl.ginput(-1)
### pts = np.asarray(pts, dtype=np.int32)
### data = np.zeros(np.shape(mov)[1:], dtype=np.int32)
# if CV_VERSION == 2:
# lt = cv2.CV_AA
# elif CV_VERSION == 3:
### lt = cv2.LINE_AA
### cv2.fillConvexPoly(data, pts, (1,1,1), lineType=lt)
# rois[0]=data
# eye_trace=np.mean(mov*rois[0],axis=(1,2))
# mov_trace=np.mean((np.diff(np.asarray(mov,dtype=np.float32),axis=0)**2)*rois[1],axis=(1,2))
# mov=np.transpose(mov,[0,2,1])
###
# mov=mov[:,:,::-1]
###
# if mov.shape[0]>0:
# ts=np.array(trial['ts'])
# if np.size(ts)>0:
# print (ts[-1,0]-ts[0,0])
# new_ts=np.linspace(0,ts[-1,0]-ts[0,0],np.shape(mov)[0])
###
# print 1/np.mean(np.diff(new_ts))
# tims.append(new_ts)
###
# mov=cb.movie(mov*rois[0][::-1].T,fr=1/np.mean(np.diff(new_ts)))
# x_max,y_max=np.max(np.nonzero(np.max(mov,0)),1)
# x_min,y_min=np.min(np.nonzero(np.max(mov,0)),1)
# mov=mov[:,x_min:x_max,y_min:y_max]
### mov=np.mean(mov, axis=(1,2))
###
# if mov.ndim == 3:
# window_hp=(177,1,1)
# window_lp=(7,1,1)
# bl=signal.medfilt(mov,window_hp)
# mov=signal.medfilt(mov-bl,window_lp)
###
# else:
# window_hp=201
# window_lp=3
# bl=signal.medfilt(mov,window_hp)
# bl=cm.mode_robust(mov)
# mov=signal.medfilt(mov-bl,window_lp)
###
###
# if mov.ndim == 3:
### eye_traces.append(np.mean(mov, axis=(1,2)))
# else:
# eye_traces.append(mov)
###
# mtot.append(mov)
# trial_info.append(dt['trials'][idx_tr,:])
# cb.movie(mov,fr=1/np.mean(np.diff(new_ts)))
##
# %%
# %%
# sub_trig_img=downsample_triggers(triggers_img.copy(),fraction_downsample=.3)
# %%
# if num_frames_movie != triggers[-1,-1]:
## raise Exception('Triggers values do not match!')
##
# %%
# fnames=[]
# sub_trig_names=trigger_names[39:95].copy()
# sub_trig=triggers[39:95].copy().T
# for a,b in zip(sub_trig_names,sub_trig):
# fnames.append(a+'.hdf5')
###
# fraction_downsample=.333333333333333333333; # useful to downsample the movie across time. fraction_downsample=.1 measn downsampling by a factor of 10
# sub_trig[:2]=np.round(sub_trig[:2]*fraction_downsample)
# sub_trig[-1]=np.floor(sub_trig[-1]*fraction_downsample)
# sub_trig[-1]=np.cumsum(sub_trig[-1])
# fname_new=cm.save_memmap(fnames,base_name='Yr',resize_fact=(1,1,fraction_downsample),remove_init=0,idx_xy=(slice(90,-10,None),slice(30,-120,None)))
# %%
# m=cb.load(fname_new,fr=30*fraction_downsample)
# T,d1,d2=np.shape(m)
# %%
# if T != sub_trig[-1,-1]:
### raise Exception('Triggers values do not match!')
# %% how to take triggered aligned movie
# wvf=mmm.take(trg)
# %%
# newm=m.take(trg,axis=0)
# newm=newm.mean(axis=1)
# %%
# (newm-np.mean(newm,0)).play(backend='opencv',fr=3,gain=2.,magnification=1,do_loop=True)
# %%v
# Yr,d1,d2,T=cm.load_memmap(fname_new)
# d,T=np.shape(Yr)
# Y=np.reshape(Yr,(d1,d2,T),order='F') # 3D version of the movie
##
# %%
##
# pl.plot(np.nanmedian(np.array(eye_traces).T,1))
##
# %%
##mov = np.concatenate(mtot,axis=0)
# m1=cb.movie(mov,fr=1/np.mean(np.diff(new_ts)))
# x_max,y_max=np.max(np.nonzero(np.max(m,0)),1)
# x_min,y_min=np.min(np.nonzero(np.max(m,0)),1)
# m1=m[:,x_min:x_max,y_min:y_max]
# %% filters
##b, a = signal.butter(8, [.05, .5] ,'bandpass')
# pl.plot(np.mean(m1,(1,2))-80)
# pl.plot(signal.lfilter(b,a,np.mean(m1,(1,2))),linewidth=2)
# %%
# m1.play(backend='opencv',gain=1.,fr=f_rate,magnification=3)
# %% NMF
##comps, tim,_=cb.behavior.extract_components(np.maximum(0,m1-np.min(m1,0)),n_components=4,init='nndsvd',l1_ratio=1,alpha=0,max_iter=200,verbose=True)
# pl.plot(np.squeeze(np.array(tim)).T)
# %% ICA
##from sklearn.decomposition import FastICA
# fica=FastICA(n_components=3,whiten=True,max_iter=200,tol=1e-6)
# X=fica.fit_transform(np.reshape(m1,(m1.shape[0],m1.shape[1]*m1.shape[2]),order='F').T,)
# pl.plot(X)
# %%
# for count,c in enumerate(comps):
# pl.subplot(2,3,count+1)
# pl.imshow(c)
##
# %%
# md=cm.mode_robust(m1,0)
# mm1=m1*(m1<md)
# rob_std=np.sum(mm1**2,0)/np.sum(mm1>0,0)
# rob_std[np.isnan(rob_std)]=0
# mm2=m1*(m1>(md+rob_std))
# %%
##
##dt = h5py.File('20160423165229_data.h5')
# sync for software
# np.array(dt['sync'])
# dt['sync'].attrs['keys']
# dt['trials']
# dt['trials'].attrs
# dt['trials'].attrs['keys']
# you needs to apply here the sync on dt['sync'], like,
##us_time_cam1=np.asarray(dt['trials'])[:,3] - np.array(dt['sync'])[1]
# main is used as the true time stamp, and you can adjust the value with respect to main sync value
# np.array(dt['sync']) # these are the values read on a unique clock from the three threads
# %%
##from skimage.external import tifffile
##
# tf=tifffile.TiffFile('20160423165229_00001_00001.tif')
# imd=tf.pages[0].tags['image_description'].value
# for pag in tf.pages:
# imd=pag.tags['image_description'].value
# i2cd=si_parse(imd)['I2CData']
## print (i2cd)
# %%
# with h5py.File('20160705103903_cam2.h5') as f1:
# for k in f1.keys()[:1]:
### m = np.array(f1[k]['mov'])
###
###
# pl.imshow(np.mean(m,0),cmap='gray')
# %%
# with h5py.File('20160705103903_data.h5') as f1:
# print f1.keys()
### rois= np.array(f1['roi'])
# %%
# with h5py.File('20160705103903_cam2.h5') as f1:
# for k in f1.keys()[:1]:
### m = np.array(f1[k]['mov'])
###
###
# pl.imshow(np.mean(m,0),cmap='gray')
# pl.imshow(rois[0],alpha=.3)
# pl.imshow(rois[1],alpha=.3)
###
|
gpl-2.0
|
sinkpoint/dipy
|
dipy/data/__init__.py
|
2
|
12450
|
"""
Read test or example data
"""
from __future__ import division, print_function, absolute_import
import sys
import json
from nibabel import load
from dipy.io.bvectxt import read_bvec_file
from os.path import join as pjoin, dirname
if sys.version_info[0] < 3:
import cPickle
def loads_compat(bytes):
return cPickle.loads(bytes)
else: # Python 3
import pickle
# Need to load pickles saved in Python 2
def loads_compat(bytes):
return pickle.loads(bytes, encoding='latin1')
import gzip
import numpy as np
from dipy.core.gradients import GradientTable, gradient_table
from dipy.core.sphere import Sphere, HemiSphere
from dipy.sims.voxel import SticksAndBall
import numpy as np
from dipy.data.fetcher import (fetch_scil_b0,
read_scil_b0,
fetch_stanford_hardi,
read_stanford_hardi,
fetch_taiwan_ntu_dsi,
read_taiwan_ntu_dsi,
fetch_sherbrooke_3shell,
read_sherbrooke_3shell,
fetch_isbi2013_2shell,
read_isbi2013_2shell,
read_stanford_labels,
fetch_syn_data,
read_syn_data,
fetch_stanford_t1,
read_stanford_t1,
fetch_stanford_pve_maps,
read_stanford_pve_maps,
fetch_cenir_multib,
read_cenir_multib,
fetch_mni_template,
read_mni_template)
from ..utils.arrfuncs import as_native_array
from dipy.tracking.streamline import relist_streamlines
THIS_DIR = dirname(__file__)
SPHERE_FILES = {
'symmetric362': pjoin(THIS_DIR, 'evenly_distributed_sphere_362.npz'),
'symmetric642': pjoin(THIS_DIR, 'evenly_distributed_sphere_642.npz'),
'symmetric724': pjoin(THIS_DIR, 'evenly_distributed_sphere_724.npz'),
'repulsion724': pjoin(THIS_DIR, 'repulsion724.npz'),
'repulsion100': pjoin(THIS_DIR, 'repulsion100.npz')
}
class DataError(Exception):
pass
def get_sim_voxels(name='fib1'):
""" provide some simulated voxel data
Parameters
------------
name : str, which file?
'fib0', 'fib1' or 'fib2'
Returns
---------
dix : dictionary, where dix['data'] returns a 2d array
where every row is a simulated voxel with different orientation
Examples
----------
>>> from dipy.data import get_sim_voxels
>>> sv=get_sim_voxels('fib1')
>>> sv['data'].shape
(100, 102)
>>> sv['fibres']
'1'
>>> sv['gradients'].shape
(102, 3)
>>> sv['bvals'].shape
(102,)
>>> sv['snr']
'60'
>>> sv2=get_sim_voxels('fib2')
>>> sv2['fibres']
'2'
>>> sv2['snr']
'80'
Notes
-------
These sim voxels were provided by M.M. Correia using Rician noise.
"""
if name == 'fib0':
fname = pjoin(THIS_DIR, 'fib0.pkl.gz')
if name == 'fib1':
fname = pjoin(THIS_DIR, 'fib1.pkl.gz')
if name == 'fib2':
fname = pjoin(THIS_DIR, 'fib2.pkl.gz')
return loads_compat(gzip.open(fname, 'rb').read())
def get_skeleton(name='C1'):
""" provide skeletons generated from Local Skeleton Clustering (LSC)
Parameters
-----------
name : str, 'C1' or 'C3'
Returns
-------
dix : dictionary
Examples
---------
>>> from dipy.data import get_skeleton
>>> C=get_skeleton('C1')
>>> len(C.keys())
117
>>> for c in C: break
>>> sorted(C[c].keys())
['N', 'hidden', 'indices', 'most']
"""
if name == 'C1':
fname = pjoin(THIS_DIR, 'C1.pkl.gz')
if name == 'C3':
fname = pjoin(THIS_DIR, 'C3.pkl.gz')
return loads_compat(gzip.open(fname, 'rb').read())
def get_sphere(name='symmetric362'):
''' provide triangulated spheres
Parameters
------------
name : str
which sphere - one of:
* 'symmetric362'
* 'symmetric642'
* 'symmetric724'
* 'repulsion724'
* 'repulsion100'
Returns
-------
sphere : a dipy.core.sphere.Sphere class instance
Examples
--------
>>> import numpy as np
>>> from dipy.data import get_sphere
>>> sphere = get_sphere('symmetric362')
>>> verts, faces = sphere.vertices, sphere.faces
>>> verts.shape
(362, 3)
>>> faces.shape
(720, 3)
>>> verts, faces = get_sphere('not a sphere name') #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
DataError: No sphere called "not a sphere name"
'''
fname = SPHERE_FILES.get(name)
if fname is None:
raise DataError('No sphere called "%s"' % name)
res = np.load(fname)
# Set to native byte order to avoid errors in compiled routines for
# big-endian platforms, when using these spheres.
return Sphere(xyz=as_native_array(res['vertices']),
faces=as_native_array(res['faces']))
default_sphere = HemiSphere.from_sphere(get_sphere('symmetric724'))
small_sphere = HemiSphere.from_sphere(get_sphere('symmetric362'))
def get_data(name='small_64D'):
""" provides filenames of some test datasets or other useful parametrisations
Parameters
----------
name : str
the filename/s of which dataset to return, one of:
'small_64D' small region of interest nifti,bvecs,bvals 64 directions
'small_101D' small region of interest nifti,bvecs,bvals 101 directions
'aniso_vox' volume with anisotropic voxel size as Nifti
'fornix' 300 tracks in Trackvis format (from Pittsburgh Brain Competition)
'gqi_vectors' the scanner wave vectors needed for a GQI acquisitions of 101 directions tested on Siemens 3T Trio
'small_25' small ROI (10x8x2) DTI data (b value 2000, 25 directions)
'test_piesno' slice of N=8, K=14 diffusion data
'reg_c' small 2D image used for validating registration
'reg_o' small 2D image used for validation registration
'cb_2' two vectorized cingulum bundles
Returns
-------
fnames : tuple
filenames for dataset
Examples
----------
>>> import numpy as np
>>> from dipy.data import get_data
>>> fimg,fbvals,fbvecs=get_data('small_101D')
>>> bvals=np.loadtxt(fbvals)
>>> bvecs=np.loadtxt(fbvecs).T
>>> import nibabel as nib
>>> img=nib.load(fimg)
>>> data=img.get_data()
>>> data.shape
(6, 10, 10, 102)
>>> bvals.shape
(102,)
>>> bvecs.shape
(102, 3)
"""
if name == 'small_64D':
fbvals = pjoin(THIS_DIR, 'small_64D.bvals.npy')
fbvecs = pjoin(THIS_DIR, 'small_64D.gradients.npy')
fimg = pjoin(THIS_DIR, 'small_64D.nii')
return fimg, fbvals, fbvecs
if name == '55dir_grad.bvec':
return pjoin(THIS_DIR, '55dir_grad.bvec')
if name == 'small_101D':
fbvals = pjoin(THIS_DIR, 'small_101D.bval')
fbvecs = pjoin(THIS_DIR, 'small_101D.bvec')
fimg = pjoin(THIS_DIR, 'small_101D.nii.gz')
return fimg, fbvals, fbvecs
if name == 'aniso_vox':
return pjoin(THIS_DIR, 'aniso_vox.nii.gz')
if name == 'fornix':
return pjoin(THIS_DIR, 'tracks300.trk')
if name == 'gqi_vectors':
return pjoin(THIS_DIR, 'ScannerVectors_GQI101.txt')
if name == 'dsi515btable':
return pjoin(THIS_DIR, 'dsi515_b_table.txt')
if name == 'dsi4169btable':
return pjoin(THIS_DIR, 'dsi4169_b_table.txt')
if name == 'grad514':
return pjoin(THIS_DIR, 'grad_514.txt')
if name == "small_25":
fbvals = pjoin(THIS_DIR, 'small_25.bval')
fbvecs = pjoin(THIS_DIR, 'small_25.bvec')
fimg = pjoin(THIS_DIR, 'small_25.nii.gz')
return fimg, fbvals, fbvecs
if name == "S0_10":
fimg = pjoin(THIS_DIR, 'S0_10slices.nii.gz')
return fimg
if name == "test_piesno":
fimg = pjoin(THIS_DIR, 'test_piesno.nii.gz')
return fimg
if name == "reg_c":
return pjoin(THIS_DIR, 'C.npy')
if name == "reg_o":
return pjoin(THIS_DIR, 'circle.npy')
if name == 'cb_2':
return pjoin(THIS_DIR, 'cb_2.npz')
if name == "t1_coronal_slice":
return pjoin(THIS_DIR, 't1_coronal_slice.npy')
def _gradient_from_file(filename):
"""Reads a gradient file saved as a text file compatible with np.loadtxt
and saved in the dipy data directory"""
def gtab_getter():
gradfile = pjoin(THIS_DIR, filename)
grad = np.loadtxt(gradfile, delimiter=',')
gtab = GradientTable(grad)
return gtab
return gtab_getter
get_3shell_gtab = _gradient_from_file("gtab_3shell.txt")
get_isbi2013_2shell_gtab = _gradient_from_file("gtab_isbi2013_2shell.txt")
get_gtab_taiwan_dsi = _gradient_from_file("gtab_taiwan_dsi.txt")
def dsi_voxels():
fimg, fbvals, fbvecs = get_data('small_101D')
bvals = np.loadtxt(fbvals)
bvecs = np.loadtxt(fbvecs).T
img = load(fimg)
data = img.get_data()
gtab = gradient_table(bvals, bvecs)
return data, gtab
def dsi_deconv_voxels():
gtab = gradient_table(np.loadtxt(get_data('dsi515btable')))
data = np.zeros((2, 2, 2, 515))
for ix in range(2):
for iy in range(2):
for iz in range(2):
data[ix, iy, iz], dirs = SticksAndBall(gtab,
d=0.0015,
S0=100,
angles=[(0, 0), (90, 0)],
fractions=[50, 50],
snr=None)
return data, gtab
def mrtrix_spherical_functions():
"""Spherical functions represented by spherical harmonic coefficients and
evaluated on a discrete sphere.
Returns
-------
func_coef : array (2, 3, 4, 45)
Functions represented by the coefficients associated with the
mxtrix spherical harmonic basis of order 8.
func_discrete : array (2, 3, 4, 81)
Functions evaluated on `sphere`.
sphere : Sphere
The discrete sphere, points on the surface of a unit sphere, used to
evaluate the functions.
Notes
-----
These coefficients were obtained by using the dwi2SH command of mrtrix.
"""
func_discrete = load(pjoin(THIS_DIR, "func_discrete.nii.gz")).get_data()
func_coef = load(pjoin(THIS_DIR, "func_coef.nii.gz")).get_data()
gradients = np.loadtxt(pjoin(THIS_DIR, "sphere_grad.txt"))
# gradients[0] and the first volume of func_discrete,
# func_discrete[..., 0], are associated with the b=0 signal.
# gradients[:, 3] are the b-values for each gradient/volume.
sphere = Sphere(xyz=gradients[1:, :3])
return func_coef, func_discrete[..., 1:], sphere
dipy_cmaps = None
def get_cmap(name):
"""Makes a callable, similar to maptlotlib.pyplot.get_cmap"""
global dipy_cmaps
if dipy_cmaps is None:
filename = pjoin(THIS_DIR, "dipy_colormaps.json")
with open(filename) as f:
dipy_cmaps = json.load(f)
desc = dipy_cmaps.get(name)
if desc is None:
return None
def simple_cmap(v):
"""Emulates matplotlib colormap callable"""
rgba = np.ones((len(v), 4))
for i, color in enumerate(('red', 'green', 'blue')):
x, y0, y1 = zip(*desc[color])
# Matplotlib allows more complex colormaps, but for users who do
# not have Matplotlib dipy makes a few simple colormaps available.
# These colormaps are simple because y0 == y1, and therefor we
# ignore y1 here.
rgba[:, i] = np.interp(v, x, y0)
return rgba
return simple_cmap
def two_cingulum_bundles():
fname = get_data('cb_2')
res = np.load(fname)
cb1 = relist_streamlines(res['points'], res['offsets'])
cb2 = relist_streamlines(res['points2'], res['offsets2'])
return cb1, cb2
def matlab_life_results():
matlab_rmse = np.load(pjoin(THIS_DIR, 'life_matlab_rmse.npy'))
matlab_weights = np.load(pjoin(THIS_DIR, 'life_matlab_weights.npy'))
return matlab_rmse, matlab_weights
|
bsd-3-clause
|
hainm/scikit-learn
|
examples/ensemble/plot_partial_dependence.py
|
249
|
4456
|
"""
========================
Partial Dependence Plots
========================
Partial dependence plots show the dependence between the target function [1]_
and a set of 'target' features, marginalizing over the
values of all other features (the complement features). Due to the limits
of human perception the size of the target feature set must be small (usually,
one or two) thus the target features are usually chosen among the most
important features
(see :attr:`~sklearn.ensemble.GradientBoostingRegressor.feature_importances_`).
This example shows how to obtain partial dependence plots from a
:class:`~sklearn.ensemble.GradientBoostingRegressor` trained on the California
housing dataset. The example is taken from [HTF2009]_.
The plot shows four one-way and one two-way partial dependence plots.
The target variables for the one-way PDP are:
median income (`MedInc`), avg. occupants per household (`AvgOccup`),
median house age (`HouseAge`), and avg. rooms per household (`AveRooms`).
We can clearly see that the median house price shows a linear relationship
with the median income (top left) and that the house price drops when the
avg. occupants per household increases (top middle).
The top right plot shows that the house age in a district does not have
a strong influence on the (median) house price; so does the average rooms
per household.
The tick marks on the x-axis represent the deciles of the feature values
in the training data.
Partial dependence plots with two target features enable us to visualize
interactions among them. The two-way partial dependence plot shows the
dependence of median house price on joint values of house age and avg.
occupants per household. We can clearly see an interaction between the
two features:
For an avg. occupancy greater than two, the house price is nearly independent
of the house age, whereas for values less than two there is a strong dependence
on age.
.. [HTF2009] T. Hastie, R. Tibshirani and J. Friedman,
"Elements of Statistical Learning Ed. 2", Springer, 2009.
.. [1] For classification you can think of it as the regression score before
the link function.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn.cross_validation import train_test_split
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.ensemble.partial_dependence import plot_partial_dependence
from sklearn.ensemble.partial_dependence import partial_dependence
from sklearn.datasets.california_housing import fetch_california_housing
# fetch California housing dataset
cal_housing = fetch_california_housing()
# split 80/20 train-test
X_train, X_test, y_train, y_test = train_test_split(cal_housing.data,
cal_housing.target,
test_size=0.2,
random_state=1)
names = cal_housing.feature_names
print('_' * 80)
print("Training GBRT...")
clf = GradientBoostingRegressor(n_estimators=100, max_depth=4,
learning_rate=0.1, loss='huber',
random_state=1)
clf.fit(X_train, y_train)
print("done.")
print('_' * 80)
print('Convenience plot with ``partial_dependence_plots``')
print
features = [0, 5, 1, 2, (5, 1)]
fig, axs = plot_partial_dependence(clf, X_train, features, feature_names=names,
n_jobs=3, grid_resolution=50)
fig.suptitle('Partial dependence of house value on nonlocation features\n'
'for the California housing dataset')
plt.subplots_adjust(top=0.9) # tight_layout causes overlap with suptitle
print('_' * 80)
print('Custom 3d plot via ``partial_dependence``')
print
fig = plt.figure()
target_feature = (1, 5)
pdp, (x_axis, y_axis) = partial_dependence(clf, target_feature,
X=X_train, grid_resolution=50)
XX, YY = np.meshgrid(x_axis, y_axis)
Z = pdp.T.reshape(XX.shape).T
ax = Axes3D(fig)
surf = ax.plot_surface(XX, YY, Z, rstride=1, cstride=1, cmap=plt.cm.BuPu)
ax.set_xlabel(names[target_feature[0]])
ax.set_ylabel(names[target_feature[1]])
ax.set_zlabel('Partial dependence')
# pretty init view
ax.view_init(elev=22, azim=122)
plt.colorbar(surf)
plt.suptitle('Partial dependence of house value on median age and '
'average occupancy')
plt.subplots_adjust(top=0.9)
plt.show()
|
bsd-3-clause
|
WMD-Bath/sulfur-model
|
scripts/plots.py
|
2
|
45105
|
#! /usr/bin/env python
import argparse
import os.path
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.ticker import MultipleLocator
from matplotlib import gridspec
from scipy.special import erf, erfc
import ase.thermochemistry
import ase.db
import shelve
from itertools import cycle
from matplotlib import rc, rcParams
#rc('font',**{'family':'serif', 'weight':'normal'})
## for Palatino and other serif fonts use:
#rc('font',**{'family':'serif','serif':['Palatino']})
rc('font',**{'family':'sans-serif', 'sans-serif':['Helvetica Neue']})
rc('text', usetex=True)
# rcParams['text.latex.preamble'] = [r'\boldmath']
rcParams['text.latex.preamble'] = [r'\usepackage{helvet} \usepackage{sfmath}']
rc('legend',**{'fontsize':10})
import os # get correct path for datafiles when called from another directory
import sys # PATH manipulation to ensure sulfur module is available
from itertools import izip
from collections import namedtuple
Calc_n_mu = namedtuple('Calc_n_mu','n mu H labels')
script_directory = os.path.dirname(__file__)
# Append a trailing slash to make coherent directory name - this would select the
# root directory in the case of no prefix, so we need to check
if script_directory:
script_directory = script_directory + '/'
module_directory = os.path.abspath(script_directory + '..')
data_directory = os.path.abspath(script_directory + '../data')
sys.path.insert(0,module_directory)
from sulfur import get_potentials, unpack_data, reference_energy, solve_composition, mix_enthalpies
ordered_species = ['S2','S3_ring','S3_bent','S4_buckled','S4_eclipsed','S5_ring','S6_stack_S3','S6_branched','S6_buckled','S6_chain_63','S7_ring','S7_branched','S8']
data_sets = {'LDA':'sulfur_lda.json', 'PBEsol':'sulfur_pbesol.json', 'PBE0':'sulfur_pbe0.json', 'PBE0_scaled':'sulfur_pbe0_96.json', 'B3LYP':'sulfur_b3lyp.json'}
species_colors = {'S2':'#222222','S3_ring':'#a6cee3','S3_bent':'#1f78b4','S4_buckled':'#b2df8a','S4_eclipsed':'#33a02c','S5_ring':'#fb9a99','S6_stack_S3':'#e31a1c','S6_branched':'#fdbf6f','S6_buckled':'#ff7f00','S6_chain_63':'#cab2d6','S7_ring':'#6a3d9a','S7_branched':'#bbbb55','S4_C2h':'#b04040','S8':'#b15928'}
species_markers = {'S2':'8','S3_ring':'>','S3_bent':'<','S4_buckled':'^','S4_eclipsed':'o',
'S5_ring':'d','S6_stack_S3':'D','S6_branched':'H','S6_buckled':'h','S6_chain_63':'*',
'S7_ring':'p','S7_branched':'s','S8':'x'}
# LaTeX formatted names for species. Keys correspond to database keys
species_names = {'S2':r'S$_2$ (D$_{\infty \mathrm{h}}$)','S3_ring':r'S$_3$ (D$_{3\mathrm{h}}$)','S3_bent':r'S$_3$ (C$_{2\mathrm{v}}$)','S4_buckled':r'S$_4$ (D$_{2\mathrm{d}}$)','S4_eclipsed':r'S$_4$ (C$_{2\mathrm{v}}$)','S4_C2h':r'S$_4$ (C$_{2\mathrm{h}}$)','S5_ring':r'S$_5$ (C$_\mathrm{s}$)','S6_stack_S3':r'S$_6$ (D$_{3 \mathrm{h}}$)','S6_branched':r'S$_6$ (C$_1$, branched)','S6_buckled':r'S$_6$ (C$_{2\mathrm{v}}$)','S6_chain_63':r'S$_6$ (C$_1$, chain)','S7_ring':r'S$_7$ (C$_{\mathrm{s}}$)','S7_branched':r'S$_7$ (C$_\mathrm{s}$, branched)','S8':r'S$_8$ (D$_{4\mathrm{d}}$)'}
# Alternative / LaTeX escaped names for DFT functionals. May also be useful for changing capitalisation, LDA vs LSDA etc.
functional_names = {'PBE0_scaled':r'PBE0 (scaled)'}
# Add module to path
import sys
sys.path.append(script_directory+'../')
import scipy.constants as constants
eV2Jmol = constants.physical_constants['electron volt-joule relationship'][0] * constants.N_A
k = constants.physical_constants['Boltzmann constant in eV/K'][0]
### Parameters for PBE0_scaled fits ###
S8_poly = [ -3.810e-13, 1.808e-09, -4.012e-06,
-2.457e-03, 7.620e-01]
S2_poly = [ -8.654e-14, 4.001e-10, -8.566e-07,
-1.848e-03, 1.207e00]
gaussian_height_poly = [ 6.663e01, -2.041e02, 1.414e03]
T_tr_poly = [ 1.828, -8.295, 7.272e01 , 5.077e02]
gaussian_b = 10
gaussian_c = 80
def mu_S8_fit(T,P):
return np.polyval(S8_poly,T) + k*T*np.log(P/1E5)
def mu_S2_fit(T,P):
return np.polyval(S2_poly,T) + k*T*np.log(P/1E5)
def T_tr(P):
return np.polyval(T_tr_poly, np.log10(P))
def mu_fit(T,P):
t_tr = T_tr(P)
mu_S8_contrib = mu_S8_fit(T,P)*erfc((T-t_tr)/gaussian_b) * (1./(2.*8.)) * eV2Jmol
mu_S2_contrib = mu_S2_fit(T,P)*(erf((T-t_tr)/gaussian_b)+1) * (1./(2.*2.)) * eV2Jmol
gaussian_contrib = -(np.polyval(gaussian_height_poly, np.log10(P)))*np.exp(-(T-(t_tr-gaussian_b))**2/(2.*gaussian_c**2))
return mu_S8_contrib + mu_S2_contrib + gaussian_contrib
def plot_T_composition(T, n, labels, title, filename=False):
axis=plt.gca()
fig=plt.gcf()
axis.set_color_cycle(["#222222","#a6cee3","#1f78b4","#b2df8a","#33a02c","#fb9a99","#e31a1c","#fdbf6f","#ff7f00","#cab2d6","#6a3d9a","#bbbb55","#b04040","#b15928"])
plt.plot(T,n, linewidth=5)
axis.set_position([0.1,0.15,0.6,0.75])
plt.legend(labels, loc='center left',bbox_to_anchor=(1,0.5))
plt.xlabel('Temperature / K')
plt.ylabel('Mole fraction $x_i$')
plt.title(title)
plt.xlim(400,1500) and plt.ylim(0,1)
fig.set_size_inches(8,6)
if filename:
plt.savefig(filename)
else:
plt.show()
plt.close(fig)
def plot_frequencies(functionals=False, figsize=False, filename=False):
"""
Plot calculated vibrational mode frequencies of S8 compared to spectroscopic data
Arguments:
functionals: iterable of strings identifying DFT dataset; each string must be a key in 'data_sets' dict
figsize: 2-tuple of figure dimensions in inches
filename: path to output file. If False (default), print to screen instead.
"""
if not functionals:
functionals = data_sets.keys()
if not figsize:
figsize = (8.4/2.54, 8.4/2.54)
fig = plt.figure(figsize=figsize)
NIST_S8_f = [475,218,471,471,191,191,475,475,152,152,56,56,411,243,437,437,248,248]
index=0
ticklabels=[]
for functional in functionals:
index += 1
db_file = data_directory + '/' + data_sets[functional]
db = ase.db.connect(db_file)
freqs = db.get_dict('S8').data.frequencies
plt.plot([index]*len(freqs),freqs, '_', markersize=20, label=functional)
if functional in functional_names:
ticklabels.append(functional_names[functional])
else:
ticklabels.append(functional)
index +=1
plt.plot([index]*len(NIST_S8_f), NIST_S8_f, 'k_', markersize=20, label='Expt')
ticklabels.append('Expt')
plt.xlim(0.5,len(ticklabels)+0.5)
axis = plt.gca()
axis.xaxis.set_ticks(range(1,len(ticklabels)+1))
axis.xaxis.set_ticklabels(ticklabels, rotation=35, ha='right')
# fontsize=10
# for tick in axis.xaxis.get_major_ticks():
# tick.label.set_fontsize(fontsize)
# for tick in axis.yaxis.get_major_ticks():
# tick.label.set_fontsize(fontsize)
plt.ylabel('Frequency / cm$^{-1}$')
plt.ylim(0,500)
plt.subplots_adjust(left=0.2,bottom=0.25)
if filename:
plt.savefig(filename)
else:
plt.show()
plt.close(fig)
def plot_composition(T, P, data, functionals=False, filename=False):
"""
Plot composition vs T over a range of pressures and DFT functionals in a neat tiled array
Arguments:
T: iterable of temperatures in K
P: iterable of pressures in Pa
data: dict containing Calc_n_mu namedtuples, with keys corresponding to 'functionals'.
Each namedtuple contains the nested lists n[P][T], mu[P][T] and list labels.
n: atom frac of S of each species, corresponding to labels
mu: free energy of mixture on atom basis in J mol-1
labels: identity labels of each species in mixture
functionals: iterable of strings identifying DFT dataset; each string must be a key in 'data_sets' dict
filename: path to output file. If False (default), print to screen instead.
"""
if functionals == False:
functionals = data.keys()
fig = plt.figure(figsize = (17.2 / 2.54, 17 / 2.54))
gs = gridspec.GridSpec(len(functionals), len(P), bottom=0.25)
tick_length = 4
tick_width = 0.5
for row, functional in enumerate(functionals):
color_cycle = [species_colors[species] for species in data[functional].labels]
for col, p in enumerate(P):
ax = plt.subplot(gs[row,col])
ax.set_color_cycle(color_cycle)
for i, species in enumerate(data[functional].labels):
ax.plot(T, [data[functional].n[col][t_index][i] for t_index in range(len(T))])
ml = MultipleLocator(400)
ax.xaxis.set_major_locator(ml)
ax.axes.set_ylim([0,1])
ax.axes.set_xlim([400,1500])
if row == 0:
ax.set_title("$10^{" + "{0:d}".format(int(np.log10(p))) + "}$ Pa", fontweight='normal')
ax.set_xticklabels('',visible=False)
elif row != len(functionals) -1:
ax.set_xticklabels('',visible=False)
else:
ax.axes.set_xlabel('Temperature / K')
if col == 0:
if functional in functional_names:
functional_label = functional_names[functional]
else:
functional_label = functional
ax.axes.set_ylabel(functional_label)
ax.set_yticks([0,1])
ax.set_yticklabels(['0','1'])
ml = MultipleLocator(0.2)
ax.yaxis.set_minor_locator(ml)
ax.tick_params('both',length=tick_length,width=tick_width, which='both')
else:
ax.set_yticklabels('',visible=False)
ax.tick_params('both',length=tick_length,width=tick_width, which='both')
plt.legend([plt.Line2D((0,1),(0,0), color=species_colors[species]) for species in ordered_species],
[species_names[species] for species in ordered_species], ncol=4, loc='center', bbox_to_anchor=(0.5,0.1), bbox_transform=fig.transFigure, fontsize=11)
if filename:
plt.savefig(filename)
else:
plt.show()
plt.close(fig)
def plot_n_pressures(functional, T=False, P_list=False, P_ref=1E5, compact=False, filename=False):
if not T:
T = np.linspace(10,1500,200)
if not P_list:
P_list = [1E2, 1E5, 1E7]
db_file = data_directory + '/' + data_sets[functional]
labels, thermo, a = unpack_data(db_file, ref_energy=reference_energy(db_file, units='eV'))
if compact:
fig_dimensions = (8 / 2.54, 14 / 2.54)
else:
fig_dimensions = (17.2 / 2.54, 12 / 2.54)
plt.figure(figsize = fig_dimensions)
axis_list = []
subplot_index = 1
xdim, ydim = len(P_list), 1
for P in P_list:
n = []
mu = []
lines = []
for t in T:
n_t, mu_t = solve_composition(a, get_potentials(thermo, T=t, P_ref=P_ref), P=P/P_ref, T=t)
n.append(n_t * a) # Multiply by a; convert between species mole fraction and atom mole fraction
mu.append(mu_t)
axis_list.append(plt.subplot(ydim,xdim,subplot_index))
axes=plt.gca()
fig=plt.gcf()
axes.set_color_cycle(["#222222","#a6cee3","#1f78b4","#b2df8a","#33a02c","#fb9a99","#e31a1c","#fdbf6f","#ff7f00","#cab2d6","#6a3d9a","#bbbb55","#b04040","#b15928"])
for n_species in [list(x) for x in zip(*n)]: # Filthy python to convert list by T to list by species
line, = axes.plot(T,n_species, linewidth=3)
lines.append(line)
plt.xlabel('Temperature / K')
if subplot_index == 1:
plt.ylabel('Mole fraction $x_i$')
axes.set_title("$10^{" + "{0:d}".format(int(np.log10(P)) + "}$ Pa", fontweight='normal'))
subplot_index += 1
# Scale down axes to make way for legend
for ax in axis_list:
box = ax.get_position()
ax.set_position([box.x0, box.y0 + box.height*0.30, box.width, box.height * 0.70])
ax.set_xticks( np.arange(0,max(T)+1,500) )
legend_ax = fig.add_axes([0.1, 0.01, 0.8, 0.2], frame_on=False, xticks=[], yticks=[]) # Invisible axes holds space for legend
legend_ax.legend(lines,labels, ncol=3, loc='center', fontsize='small')
#'fig.tight_layout()
if filename:
plt.savefig(filename)
else:
plt.show()
plt.close(fig)
def compute_data(functionals=['PBE0_scaled'], T=[298.15], P=[1E5], ref_energy='expt', enthalpy=False):
"""
Solve S_x equilibrium over specified sets of DFT data, temperatures and pressures
Arguments
functionals: iterable of strings identifying DFT dataset; must be a key in 'data_sets' dict
T: iterable of temperatures in K
P: iterable of pressures in Pa
ref_energy: Select reference energy. If 'expt' (default), use experimental enthalpy
of alpha-S as reference. If 'S8', use 1/8 * ground state energy of S8
in chosen data set as reference energy.
If 'S2', use 1/2 * ground-state energy of S2 in chosen data set as reference energy.
If a floating point number, the value of ref_energy is used with units of eV/atom.
enthalpy: Boolean flag. If True, also compute enthalpy.
(This costs extra time and is not usually required.)
Returns
data: dict containing Calc_n_mu namedtuples, with keys corresponding to 'functionals'.
Each namedtuple contains the nested lists n[P][T], mu[P][T], H[P][T] and list labels.
n: atom frac of S of each species, corresponding to labels
mu: free energy of mixture on atom basis in J mol-1
H: enthalpy of mixture on atom basis in J mol-1. False if not computed.
labels: identity labels of each species in mixture
"""
P_ref = 1E5
eqm_data = {}
for functional in functionals:
db_file = data_directory + '/' + data_sets[functional]
if type(ref_energy) != str and np.isscalar(ref_energy): # (Strings are scalar!)
labels, thermo, a = unpack_data(db_file, ref_energy=ref_energy)
elif ref_energy == 'expt':
labels, thermo, a = unpack_data(db_file, ref_energy=reference_energy(db_file, units='eV', ref='expt'))
elif ref_energy == 'S8':
labels, thermo, a = unpack_data(db_file, ref_energy=reference_energy(db_file, units='eV', ref='S8'))
elif ref_energy == 'S2':
labels, thermo, a = unpack_data(db_file, ref_energy=reference_energy(db_file, units='eV', ref='S2'))
else:
raise Exception("ref_energy key {0} not recognised")
n = []
mu = []
if enthalpy:
n, mu, H = [], [], []
for p in P:
n_p, mu_p, H_p = [], [], []
for t in T:
n_p_T, mu_p_T = solve_composition(
a, get_potentials(thermo, T=t, P_ref=P_ref), P=p/P_ref, T=t)
H_p_T = mix_enthalpies(n_p_T, thermo, t)
n_p.append(n_p_T)
mu_p.append(mu_p_T)
H_p.append(H_p_T)
n.append(n_p)
mu.append(mu_p)
H.append(H_p)
eqm_data.update({functional:Calc_n_mu(n, mu, H, labels)})
else:
for p in P:
n_p_mu_p_double = [
([n_i*a_i for n_i,a_i in izip(n_p_T,a)], mu_p_T) for
(n_p_T, mu_p_T) in (
solve_composition(a, get_potentials(thermo, T=t, P_ref=P_ref), P=p/P_ref, T=t)
for t in T)
]
n_p, mu_p = [x[0] for x in n_p_mu_p_double], [x[1] for x in n_p_mu_p_double]
n.append(n_p)
mu.append(mu_p)
eqm_data.update({functional:Calc_n_mu(n, mu, False, labels)})
return eqm_data
def plot_mu_functionals(data, T, P, functionals=False, T_range=False, mu_range=False, filename=False, compact=False):
"""
Plot free energy against T for a range of datasets.
Arguments:
data: dict containing Calc_n_mu namedtuples, with keys corresponding to 'functionals'.
Each namedtuple contains the nested lists n[P][T], mu[P][T] and list labels.
n: atom frac of S of each species, corresponding to labels
mu: free energy of mixture on atom basis in J mol-1
labels: identity labels of each species in mixture
T: Iterable of temperatures in K, corresponding to T ranges in data
P: Iterable of P values in Pa corresponding to data. Used for labelling: all pressures will be plotted
functionals: iterable containing keys of data to use. If False, all functionals in 'data_sets' will be plotted.
T_range: 2-tuple in K of temperature range to display. If False, use range of T.
mu_range: 2-tuple in kJ mol-1 of mu range to display. If False, use matplotlib default
filename: Filename for plot output. If False (default), print to screen instead.
compact: Boolean, setting width to 8cm for publication if True
"""
########## Literature data for S2, S8 ##########
s2_data = np.genfromtxt(data_directory + '/S2.dat', skip_header=2)
s8_data = np.genfromtxt(data_directory + '/S8.dat', skip_header=2)
# Fourth column contains -(G-H(Tr))/T in J/molK
T_s2 = s2_data[:,0]
DG_s2 = s2_data[:,3] * T_s2 * -1E-3
mu_s2 = (DG_s2 + 128.600)/2.
T_s8 = s8_data[:,0]
DG_s8 = s8_data[:,3] * T_s8 * -1E-3
mu_s8 = (DG_s8 + 101.416)/8.
R = 8.314 * 1E-3 # Gas constant in kJ mol-1 K-1
######## Plotting ########
if not T_range:
T_range = (min(T), max(T))
if functionals == False:
functionals = data_sets.keys()
if compact:
fig_dimensions = (8 / 2.54, 8 / 2.54)
else:
fig_dimensions = (17.2 / 2.54, 8 / 2.54)
bottom = 0.4
fig = plt.figure(figsize = fig_dimensions)
gs = gridspec.GridSpec(1,len(P), bottom=bottom)
for i_p, p in enumerate(P):
if i_p == 0:
ax = plt.subplot(gs[i_p])
left_ax = ax
ax.axes.set_ylabel('$\mu_S$ / kJ mol$^{-1}$')
else:
ax = plt.subplot(gs[i_p], sharey=left_ax)
for functional in functionals:
if functional in functional_names:
functional_name = functional_names[functional]
else:
functional_name = functional
mu_kJmol = np.array(data[functional].mu[i_p]) * 1E-3
ax.plot(T,mu_kJmol, label=functional_name)
# Plot literature data with pressure correction
ax.plot(T_s2, mu_s2 + R*T_s2*np.log(p/1E5)/2, label=r'S$_2$ (lit.)', linestyle=':')
ax.plot(T_s8, mu_s8 + R*T_s8*np.log(p/1E5)/8, label=r'S$_8$ (lit.)', linestyle=':')
if i_p > 0:
plt.setp(ax.get_yticklabels(), visible=False) # I don't understand why ax.set_yticklabels doesn't work here, but it wipes out the first column too.
ml = MultipleLocator(400)
ax.xaxis.set_major_locator(ml)
ax.axes.set_xlim(T_range)
ax.axes.set_title('P = {0} bar'.format(p * 1E-5))
ax.axes.set_xlabel('Temperature / K')
if mu_range:
ax.axes.set_ylim(mu_range)
plt.legend(ncol=4, loc='center', bbox_to_anchor=(0.5,bottom/3.), bbox_transform=fig.transFigure, fontsize=11)
if filename:
plt.savefig(filename)
else:
plt.show()
plt.close(fig)
def plot_mu_contributions( T, P, data, functionals, T_range=(400,1500), filename=False, figsize=(17.2 / 2.54, 17 / 2.54), bottom=0.4, T_units='K', T_increment=400, mu_range=False):
"""
Plot free energy of mixture, showing contributions of components.
Arguments:
data: dict containing Calc_n_mu namedtuples, with keys corresponding to 'functionals'.
Each namedtuple contains the nested lists n[P][T], mu[P][T] and list labels.
n: atom frac of S of each species, corresponding to labels
mu: free energy of mixture on atom basis in J mol-1
labels: identity labels of each species in mixture
T: Iterable of temperatures in K, corresponding to T ranges in data
P: Iterable of P values in Pa corresponding to data. Used for labelling: all pressures will be plotted
functionals: iterable containing keys of data to use. If False, all functionals
in 'data_sets' will be plotted.
T_range: 2-tuple containing temperature range of plot [default value (200,1500)]
filename: Filename for plot output. If False (default), print to screen instead.
figsize: 2-tuple containing figure dimensions in inches
bottom: fraction of figure vertically reserved for legend.
T_units: Temperature unit. If set to 'C', T_range and displayed axis are in degrees C. Input data must still be in K.
T_increment: Float; Spacing between temperature markers on x-axis
mu_range: 2-tuple. If set, force displayed y-axis range in kJ/mol
"""
fig = plt.figure(figsize=figsize)
# Allow extra space for functional labels on y axes
if len(functionals) > 1:
left = 0.2
else:
left=0.15
gs = gridspec.GridSpec(len(functionals), len(P), left=left, bottom=bottom)
tick_length = 4
tick_width = 0.5
if T_units == 'C':
T_offset=-273.15
T_label = r'Temperature / $^\circ$C'
else:
T_offset=0
T_label = r'Temperature / K'
for row, functional in enumerate(functionals):
db_file = data_directory + '/' + data_sets[functional]
labels, thermo, a = unpack_data(db_file, ref_energy=reference_energy(db_file, units='eV'))
color_cycle = [species_colors[species] for species in data[functional].labels]
for col, p in enumerate(P):
if col == 0:
ax = plt.subplot(gs[row,col])
left_ax = ax
else:
ax = plt.subplot(gs[row,col], sharey=left_ax)
ax.set_color_cycle(color_cycle)
ax.plot([t + T_offset for t in T], [get_potentials(thermo,T=t,P_ref=p) * 1E-3 / a for t in T])
ax.plot([t + T_offset for t in T], [mu * 1E-3 for mu in data[functional].mu[col]], 'm:', linewidth=4)
ax.tick_params('both',length=tick_length,width=tick_width, which='both')
ml = MultipleLocator(T_increment)
ax.xaxis.set_major_locator(ml)
ax.axes.set_xlim(T_range)
if mu_range:
ax.axes.set_ylim(mu_range)
if row == 0:
ax.set_title("$10^{" + "{0:d}".format(int(np.log10(p))) + "}$ Pa", fontweight='normal')
if row != len(functionals) -1:
ax.set_xticklabels('',visible=False)
if row == len(functionals) -1:
ax.axes.set_xlabel(T_label)
# Only specify name of functional if more than one is used
if col == 0:
if len(functionals) == 1:
functional_label = r'$\mu$ / kJ mol$^{-1}$'
elif functional in functional_names:
functional_label = functional_names[functional] + '\n' + r'$\mu$ / kJ mol$^{-1}$'
else:
functional_label = functional + '\n' + r'$\mu$ / kJ mol$^{-1}$'
ax.axes.set_ylabel(functional_label)
else:
plt.setp(ax.get_yticklabels(), visible=False) # I don't understand why ax.set_yticklabels doesn't work here, but it wipes out the first column too.
plt.legend([plt.Line2D((0,1),(0,0), color=species_colors[species]) for species in ordered_species] + [plt.Line2D((0,1),(0,0), color='m', linestyle=':', linewidth=4)],
[species_names[species] for species in ordered_species] + ['Mixture'], ncol=4, loc='center', bbox_to_anchor=(0.5,bottom/3.), bbox_transform=fig.transFigure, fontsize=11)
if filename:
plt.savefig(filename)
else:
plt.show()
plt.close(fig)
def tabulate_data(data,T,P,path='',formatting=('kJmol-1')):
"""
Write tables of composition and free energy. Write enthalpy in table if data available.
Arguments:
data: dict containing Calc_n_mu namedtuples, with keys corresponding to 'functionals'.
Each namedtuple contains the nested lists n[P][T], mu[P][T] and list labels.
n: atom frac of S of each species, corresponding to labels
mu: free energy of mixture on atom basis in J mol-1
H: enthalpy of mixture on atom basis in J mol-1
labels: identity labels of each species in mixture
T: Iterable containing temperature values in K corresponding to data
P: Iterable containing pressure values in Pa corresponding to data
path: directory for csv files to be written in
logP: Boolean
formatting: iterable of strings. 'logP' for log pressures. Set units with
'Jmol-1'|'Jmol'|'J/mol'|'kJmol-1'|'kJmol'|'kJ/mol'. Reduce decimal precision with 'short'.
"""
import string
if path:
if path[-1] != '/':
path = path + '/'
try:
os.mkdir(path)
except OSError:
pass
if formatting and any([x in formatting for x in ('Jmol-1','Jmol','J/mol')]):
energy_units = 'J mol-1'
energy_units_factor = 1.0
elif formatting and any([x in formatting for x in ('kJmol-1','kJmol','kJ/mol')]):
energy_units = 'kJ mol-1'
energy_units_factor = 1.0e-3
else:
raise Exception("no valid units in format string {0}".format(formatting))
if 'short' in formatting:
energy_string='{0:1.2f}'
else:
energy_string='{0:1.4f}'
for functional in data.keys():
with open(path + 'mu_{0}.csv'.format(functional.lower()), 'w') as f:
if 'logP' in formatting:
linelist = ['# T/K,' + string.join(['mu (10^{0:.2f} Pa) / {1}'.format(np.log10(p), energy_units) for p in P],',') + '\n']
else:
linelist = ['# T/K,' + string.join(['mu ({0} Pa) / {1}'.format(p, energy_units) for p in P],',') + '\n']
for t_index, t in enumerate(T):
linelist.append( '{0},'.format(t) + string.join([energy_string.format(mu_p[t_index]*energy_units_factor) for mu_p in data[functional].mu],',') + '\n')
f.writelines(linelist)
for functional in data.keys():
with open(path + 'n_{0}.csv'.format(functional.lower()), 'w') as f:
for p_index, p in enumerate(P):
if 'logP' in formatting:
linelist = ['# P = 10^{0:.2f} Pa\n'.format(np.log10(p))]
else:
linelist = ['# P = {0} Pa\n'.format(p)]
linelist.append('# T/K, ' + string.join(['x({0})'.format(x) for x in data[functional].labels],',') + '\n')
for t_index, t in enumerate(T):
linelist.append('{0},'.format(t) + string.join(['{0:1.4f}'.format(n) for n in data[functional].n[p_index][t_index]],',') + '\n')
f.writelines(linelist)
for functional in data.keys():
if type(data[functional].H) != bool:
with open(path + 'H_{0}.csv'.format(functional.lower()), 'w') as f:
if 'logP' in formatting:
linelist = ['# T/K,' + string.join(['H (10^{0:.2f} Pa) / {1}'.format(np.log10(p), energy_units) for p in P],',') + '\n']
else:
linelist = ['# T/K,' + string.join(['H ({0} Pa) / {1}'.format(p, energy_units) for p in P],',') + '\n']
for t_index, t in enumerate(T):
linelist.append( '{0},'.format(t) + string.join([energy_string.format(H_p[t_index]*energy_units_factor) for H_p in data[functional].H],',') + '\n')
f.writelines(linelist)
def plot_mix_contribution(T, P, data, functional='PBE0_scaled', filename=False, figsize=(8.4/2.52, 8.4/2.54)):
"""
Plot contribution of mixing entropy and minor phases to free energy.
Arguments:
T: Iterable containing temperature values in K corresponding to data
P: Iterable containing pressure values in Pa corresponding to data
data: dict containing Calc_n_mu namedtuples, with keys corresponding to 'functionals'.
Each namedtuple contains the nested lists n[P][T], mu[P][T] and list labels.
n: atom frac of S of each species, corresponding to labels
mu: free energy of mixture on atom basis in J mol-1
labels: identity labels of each species in mixture
functional: Dataset to plot; must be a key in data
filename: Filename for plot output. If False (default), print to screen instead.
figsize: Figure dimensions in inches
"""
T = np.array(T)
db_file = data_directory + '/' + data_sets[functional]
labels, thermo, a = unpack_data(db_file, ref_energy=reference_energy(db_file, units='eV'))
S2_thermo = thermo[labels.index('S2')]
S8_thermo = thermo[labels.index('S8')]
def get_gibbs_wrapper(thermo, T, P):
return(ase.thermochemistry.IdealGasThermo.get_gibbs_energy(thermo,T,P,verbose=False))
v_get_gibbs_energy=np.vectorize(get_gibbs_wrapper)
fig = plt.figure(figsize=figsize)
linestyles=['-','--',':','-.']
linecycler = cycle(linestyles)
for p_index, p in enumerate(P):
mu_S2 = v_get_gibbs_energy(S2_thermo,T, p) * eV2Jmol / 2.
mu_S8 = v_get_gibbs_energy(S8_thermo,T, p) * eV2Jmol / 8.
plt.plot(T,( data[functional].mu[p_index] - np.minimum(mu_S2, mu_S8))*1e-3, label=r'$10^{{{0:1.0f}}}$ Pa'.format(np.log10(p)), linestyle=linecycler.next(), color='k')
plt.xlabel('Temperature / K')
plt.ylabel(r'$\Delta \mu_{\mathrm{mixture}}$ / kJ mol$^{-1}$')
plt.legend(loc='upper center', bbox_to_anchor=(0.5,-0.2), ncol=2)
plt.subplots_adjust(left=0.26,bottom=0.3)
ax = plt.gca()
ml = MultipleLocator(400)
ax.xaxis.set_major_locator(ml)
ax.axes.set_xlim([400,1500])
if filename:
plt.savefig(filename)
else:
plt.show()
plt.close(fig)
def plot_surface(functional='PBE0_scaled', T_range=(400,1500), P_range=(1,7), resolution=1000, tolerance = 1e4, parameterised=True, filename=False, plot_param_err=False, nodash=False):
"""Generate a surface plot showing recommended S models. Can be slow!
Arguments:
functional: id of dataset used. PBE0_scaled is strongly recommended as it has good agreement with experimental data.
T_range: Tuple containing T range in K
P_range: Tuple containing (log10(Pmin), log10(Pmax))
resolution: Number of points on x and y axis. Note that a full free energy minimisation is carried out at each point, so print-ready resolutions will take some time to compute.
tolerance: Error threshold for transition region in Jmol-1
parameterised: Boolean. If True, use parameterised fit (polynomials, erf and gaussian). If False, solve equilibrium at all points (slow!)
filename: String containing output file. If False, print to screen.
plot_param_err: Boolean; request an additional plot showing error of parameterisation
nodash: Boolean; Skip drawing coexistence line
"""
figsize = (8.3/2.54, 8.3/2.54)
T = np.linspace(min(T_range), max(T_range), resolution)
P = 10**np.linspace(min(P_range),max(P_range),resolution)[:, np.newaxis]
if parameterised:
mu_mixture = mu_fit(T,P)
mu_S2 = mu_S2_fit(T,P) * eV2Jmol / 2.
mu_S8 = mu_S8_fit(T,P) * eV2Jmol / 8.
else:
cache = shelve.open('cache')
if cache.has_key('surface'):
T, P, data = cache['surface']
else:
data = compute_data(T=T, P=P, functionals=[functional])
cache['surface'] = (T, P, data)
cache.close()
mu_mixture = np.array(data[functional].mu)
db_file = data_directory+'/'+data_sets[functional]
labels, thermo, a = unpack_data(db_file,ref_energy=reference_energy(db_file, units='eV'))
S2_thermo = thermo[labels.index('S2')]
S8_thermo = thermo[labels.index('S8')]
def get_gibbs_wrapper(thermo, T, P):
return(ase.thermochemistry.IdealGasThermo.get_gibbs_energy(thermo,T,P,verbose=False))
v_get_gibbs_energy=np.vectorize(get_gibbs_wrapper)
mu_S2 = v_get_gibbs_energy(S2_thermo,T, P) * eV2Jmol / 2.
mu_S8 = v_get_gibbs_energy(S8_thermo,T, P) * eV2Jmol / 8.
fig = plt.figure(figsize = figsize)
CS = plt.contour(T,np.log10(P).flatten(),np.minimum(abs(mu_S2 - mu_mixture),abs(mu_S8 - mu_mixture)), [1000])
plt.contourf(T,np.log10(P).flatten(),np.minimum(abs(mu_S2 - mu_mixture),abs(mu_S8 - mu_mixture)), [1000,1e10], colors=[(0.7,0.7,1.00)])
# plt.clabel(CS, inline=1, fontsize=10) # Contour line labels
if not nodash:
plt.plot(T_tr(P),np.log10(P),'k--', linewidth=3)
plt.xlim(min(T_range),max(T_range))
plt.text(500, 4, r'S$_{8}$')
plt.text(1000, 4, r'S$_{2}$')
plt.xlabel('Temperature / K')
plt.ylabel('$\log_{10}( P / \mathrm{Pa})$')
fig.subplots_adjust(bottom=0.15, left=0.15)
ax = plt.gca()
ax.xaxis.set_major_locator(MultipleLocator(base=200))
if filename:
plt.savefig(filename)
else:
plt.show()
if plot_param_err:
mu_param = mu_fit(T,P)
fig2 = plt.figure(figsize=figsize)
CS2 =plt.contour(T,np.log10(P).flatten(), (mu_param - mu_mixture)*1e-3, cmap='Greys')
plt.clabel(CS2, inline=1, fontsize=10, colors='k', fmt='%2.1f')
plt.xlabel('Temperature / K')
plt.ylabel('$\log_{10}( P / \mathrm{Pa})$')
fig2.subplots_adjust(left=0.15, bottom=0.15)
ax = plt.gca()
ax.xaxis.set_major_locator(MultipleLocator(base=200))
if filename:
err_filename = os.path.dirname(filename) + '/param_error.' + os.path.basename(filename)
plt.savefig(err_filename)
else:
plt.show()
plt.close(fig)
def check_fit():
"""Sanity check for polynomial fitting"""
T = np.linspace(100,1000,10)
P = np.array([1E3])
data = compute_data(T=T, P=P, functionals=['PBE0_scaled'])
mu_mixture = np.array(data['PBE0_scaled'].mu)
db_file = data_directory+'/'+data_sets['PBE0_scaled']
labels, thermo, a = unpack_data(db_file,ref_energy=reference_energy(db_file, units='eV'))
S2_thermo = thermo[labels.index('S2')]
S8_thermo = thermo[labels.index('S8')]
v_get_gibbs_energy=np.vectorize(ase.thermochemistry.IdealGasThermo.get_gibbs_energy)
mu_S2 = v_get_gibbs_energy(S2_thermo,T, P, verbose=False) * eV2Jmol / 2.
mu_S8 = v_get_gibbs_energy(S8_thermo,T, P, verbose=False) * eV2Jmol / 8.
plt.plot(T, mu_mixture.transpose(), 'bx', ms=20, label="Mixture (solver)")
plt.plot(T, mu_fit(T,P),'r+', ms=20, label="Mixture (fit)")
plt.plot(T, mu_S2, 'go', label=r"S$_2$ (model)")
plt.plot(T, mu_S2_fit(T,P) * eV2Jmol/2.,'k^', label=r"S$_2$ (fit)")
plt.plot(T, mu_S8_fit(T,P) * eV2Jmol/8., 'k*', label=r"S$_8$ (fit)")
plt.legend()
plt.show()
def plot_energies(functionals=data_sets.keys(), filename=False, figsize=False):
if not figsize:
figsize = (8.3/2.54, 12/2.54)
fig = plt.figure(figsize=figsize)
gs = gridspec.GridSpec(1,1,left=0.2, bottom=0.35)
ax = plt.subplot(gs[0,0])
colors = ['r','g','b','k']
colorcycler = cycle(colors)
for functional in functionals:
color=colorcycler.next()
db_file = data_directory+'/'+data_sets[functional]
c = ase.db.connect(db_file)
E_ref = c.get_atoms('S8').get_total_energy()/8.
for species in ordered_species:
atoms = c.get_atoms(species)
E = atoms.get_total_energy()
N = atoms.get_number_of_atoms()
ax.plot(N, ((E/N)-E_ref)*eV2Jmol*1e-3, marker=species_markers[species], fillstyle='none', color=color)
# Value from reference data
ref_col=(0.4,0.4,0.4)
S2_ref_DH0 = 128.300
S8_ref_DH0 = 104.388
ref_value = S2_ref_DH0/2. - S8_ref_DH0/8.
ax.plot(2,ref_value,marker='8',fillstyle='full',color=ref_col, linestyle='none')
plt.xlim(1.5,8.5)
plt.xlabel(r'$N$ / S atoms')
plt.ylabel(r'$\frac{E_0}{N} - \frac{E_{0,\mathrm{S}_8}}{8}$ / kJ mol$^{-1}$')
colorcycler=cycle(colors) # reset colorcycler
plt.legend([plt.Line2D((0,1),(0,0), color='k', linestyle='none',
marker=species_markers[s], fillstyle='none') for s in ordered_species] +
[plt.Line2D((0,1),(0,0), color=colorcycler.next(), marker=False) for f in functionals] +
[plt.Line2D((0,1),(0,0),color=ref_col, marker='8', fillstyle='full', linestyle='none')],
[species_names[s] for s in ordered_species] + functionals + [r'S$_2$ [ref.]'],
ncol=3, loc='center', bbox_to_anchor=(0.5,0.12), numpoints=1, fontsize=8, bbox_transform=fig.transFigure)
if filename:
plt.savefig(filename)
else:
plt.show()
plt.close(fig)
def main(plots='all', tables='all', T_range=(400,1500)):
"""
Solve sulfur equilibrium from ab initio data; generate plots and tables.
A cache file is used to avoid redundant calculations.
arguments:
plots: list of strings indicating desired plots.
['all'] is equivalent to ['energies', 'composition','mu_functionals',
'mu_all_functionals','mu_contributions',
'mu_annealing','mix_contribution',
'surface','freqs']
tables: list of strings indicating results sets to include as tables.
['all'] is equivalent to ['LDA','PBEsol','PBE0','PBE0_scaled','B3LYP']
In addition the strings 'long', 'short', 'linear', 'logP' can be used
to set the formatting (default is 'short' and 'logP' behaviour)
T_range: 2-Tuple of upper and lower temperature limits
"""
### Open cache file for plot data
cache = shelve.open('cache')
if 'T_range' in cache and cache['T_range'] == T_range:
cache_T_okay = True
else:
cache_T_okay = False
cache['T_range'] = T_range
### Comparison of DFT energies
if 'all' in plots or 'energies' in plots:
plot_energies(functionals=['LDA','PBEsol','B3LYP','PBE0'], filename='plots/energies.pdf', figsize=False)
### Plot composition breakdown with PBE0_scaled at 3 pressures ###
if 'composition' in plots:
if cache_T_okay and cache.has_key('PBE0_composition'):
(T, P, data) = cache['PBE0_composition']
else:
T = np.linspace(T_range[0],T_range[1],100)
P = [10**x for x in (1,5,7)]
data = compute_data(T=T, P=P, functionals=data_sets.keys())
cache['PBE0_composition'] = (T, P, data)
cache.sync()
plot_composition(T,P, data, filename='plots/composition_all.pdf')
plot_composition(T, P, data, functionals=['LDA', 'PBEsol', 'PBE0_scaled'], filename='plots/composition_selection.pdf')
### Plots over 3 pressures: mu depending on T, calculation method; mu with
### component contributions; mu with component contributions over smaller T
### range
if any(flag in plots for flag in ('all','mu_functionals','mu_all_functionals',
'mu_contributions','mu_annealing')):
if cache_T_okay and cache.has_key('all_functionals_three_pressures'):
(T, P, data) = cache['all_functionals_three_pressures']
else:
T = np.linspace(T_range[0],T_range[1],100)
P = [10**x for x in (1,5,7)]
data = compute_data(T=T, P=P, functionals = data_sets.keys())
cache['all_functionals_three_pressures'] = (T, P, data)
cache.sync()
if 'all' in plots or 'mu_all_functionals' in plots:
plot_mu_functionals(data, T, P, mu_range=(-200,100), filename='plots/mu_all_functionals.pdf', compact=False, functionals=('LDA','PBEsol','B3LYP','PBE0','PBE0_scaled'))
if 'all' in plots or 'mu_functionals' in plots:
plot_mu_functionals(data, T, P, mu_range=(-200,100), filename='plots/mu_functionals.pdf', compact=False, functionals=('LDA','PBEsol','PBE0_scaled'))
if 'all' in plots or 'mu_contributions' in plots:
plot_mu_contributions(T, P, data, functionals=['PBE0_scaled'], filename='plots/mu_contributions.pdf', figsize=(17.2/2.54, 10/2.54), T_range=[400,1500], T_increment=400)
if 'all' in plots or 'mu_annealing' in plots:
plot_mu_contributions(T,P,data,functionals=['PBE0_scaled'],filename='plots/mu_for_annealing.pdf', figsize=(17.2/2.54, 10/2.43), T_range=(100,600), T_units='C', T_increment=100, mu_range=(-100,50))
### Plot contribution of mixing and secondary phases over a range of pressures
if 'all' in plots or 'mix_contribution' in plots:
if cache_T_okay and cache.has_key('PBE0_four_pressures'):
(T, P, data) = cache['PBE0_four_pressures']
else:
T = np.linspace(400,1500,100)
P = [10**x for x in (1.,3.,5.,7.)]
data = compute_data(T=T, P=P, functionals=['PBE0_scaled'])
cache['PBE0_four_pressures'] = (T, P, data)
cache.sync()
plot_mix_contribution(T, P, data, functional='PBE0_scaled', filename='plots/mu_mix_contribution.pdf', figsize=(8.4/2.52, 8.4/2.54))
### Tabulate data over log pressure range ###
if 'linear' in tables:
formatting=[]
else:
formatting=['logP']
if any(flag in tables for flag in ('Jmol','Jmol-1','J/mol')):
formatting.append('Jmol-1')
else:
formatting.append('kJmol-1')
# Compact tables
if 'all' in tables or 'short' in tables:
T = np.arange(400,1500,50)
P = np.power(10,np.linspace(1,7,10))
data = compute_data(T=T, P=P, functionals = data_sets.keys(), enthalpy=True)
tabulate_data(data,T,P, path=data_directory+'/alpha_ref', formatting=formatting+['short'])
data = compute_data(T=T, P=P, functionals = data_sets.keys(), ref_energy='S8', enthalpy=True)
tabulate_data(data,T,P, path=data_directory+'/S8_ref', formatting=formatting+['short'])
# Larger tables
if 'all' in tables or 'long' in tables:
T = np.arange(200,1500,10)
P = np.power(10,np.linspace(-2,7,25))
data = compute_data(T=T, P=P, functionals = data_sets.keys(), enthalpy=True)
tabulate_data(data,T,P, path=data_directory+'/precise/alpha_ref', formatting=formatting)
data = compute_data(T=T, P=P, functionals = data_sets.keys(), ref_energy='S8', enthalpy=True)
tabulate_data(data,T,P, path=data_directory+'/precise/S8_ref', formatting=formatting)
data = compute_data(T=T, P=P, functionals = data_sets.keys(), ref_energy='S2', enthalpy=True)
tabulate_data(data,T,P, path=data_directory+'/precise/S2_ref', formatting=formatting)
### Contour plots (high resolution -> Lots eqm solutions -> v. slow data calculation)
cache.close()
if 'all' in plots or 'surface' in plots:
plot_surface(resolution=200, parameterised=False, filename='plots/surface.pdf', plot_param_err=True)
# Vibrational frequencies
if 'all' in plots or 'freqs' in plots:
plot_frequencies(functionals=['LDA','PBEsol','B3LYP','PBE0','PBE0_scaled'], figsize=False, filename='plots/empirical_freqs.pdf')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-p", "--plots", type=str, default="all",
help="Plots to generate (as space-separated string). Options are: all"
" energies composition mu_functionals mu_all_functionals"
" mu_contributions mu_annealing mix_contribution surface freqs ")
parser.add_argument("-t", "--tables", type=str, default="all",
help="Space-separated string: list of results sets to include as tables."
" 'all' is equivalent to 'LDA PBEsol PBE0 PBE0_scaled B3LYP'"
" In addition the strings 'long', 'short', 'linear', 'logP' can be used"
" to set the formatting (default is 'short' and 'logP' behaviour)")
parser.add_argument("-T", "--temp_range", type=float, nargs=2, default=(400.,1500.),
help="Lower and upper temperature limit in K")
args = parser.parse_args()
plots = args.plots.split()
tables = args.tables.split()
main(plots=plots, tables=tables, T_range=args.temp_range)
|
gpl-3.0
|
boomsbloom/dtm-fmri
|
DTM/for_gensim/lib/python2.7/site-packages/gensim/test/test_sklearn_integration.py
|
3
|
6891
|
import six
import unittest
import numpy
import os
import codecs
import pickle
from scipy import sparse
from sklearn.pipeline import Pipeline
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.datasets import load_files
from sklearn import linear_model
from gensim.sklearn_integration.sklearn_wrapper_gensim_ldamodel import SklearnWrapperLdaModel
from gensim.sklearn_integration.sklearn_wrapper_gensim_lsimodel import SklearnWrapperLsiModel
from gensim.corpora import Dictionary
from gensim import matutils
module_path = os.path.dirname(__file__) # needed because sample data files are located in the same folder
datapath = lambda fname: os.path.join(module_path, 'test_data', fname)
texts = [['complier', 'system', 'computer'],
['eulerian', 'node', 'cycle', 'graph', 'tree', 'path'],
['graph', 'flow', 'network', 'graph'],
['loading', 'computer', 'system'],
['user', 'server', 'system'],
['tree', 'hamiltonian'],
['graph', 'trees'],
['computer', 'kernel', 'malfunction', 'computer'],
['server', 'system', 'computer']]
dictionary = Dictionary(texts)
corpus = [dictionary.doc2bow(text) for text in texts]
class TestSklearnLDAWrapper(unittest.TestCase):
def setUp(self):
self.model = SklearnWrapperLdaModel(id2word=dictionary, num_topics=2, passes=100, minimum_probability=0, random_state=numpy.random.seed(0))
self.model.fit(corpus)
def testPrintTopic(self):
topic = self.model.print_topics(2)
for k, v in topic:
self.assertTrue(isinstance(v, six.string_types))
self.assertTrue(isinstance(k, int))
def testTransform(self):
texts_new = ['graph','eulerian']
bow = self.model.id2word.doc2bow(texts_new)
X = self.model.transform(bow)
self.assertTrue(X.shape[0], 1)
self.assertTrue(X.shape[1], self.model.num_topics)
texts_new = [['graph','eulerian'],['server', 'flow'], ['path', 'system']]
bow = []
for i in texts_new:
bow.append(self.model.id2word.doc2bow(i))
X = self.model.transform(bow)
self.assertTrue(X.shape[0], 3)
self.assertTrue(X.shape[1], self.model.num_topics)
def testGetTopicDist(self):
texts_new = ['graph','eulerian']
bow = self.model.id2word.doc2bow(texts_new)
doc_topics, word_topics, phi_values = self.model.get_topic_dist(bow,per_word_topics=True)
for k,v in word_topics:
self.assertTrue(isinstance(v, list))
self.assertTrue(isinstance(k, int))
for k,v in doc_topics:
self.assertTrue(isinstance(v, float))
self.assertTrue(isinstance(k, int))
for k,v in phi_values:
self.assertTrue(isinstance(v, list))
self.assertTrue(isinstance(k, int))
def testPartialFit(self):
for i in range(10):
self.model.partial_fit(X=corpus) # fit against the model again
doc=list(corpus)[0] # transform only the first document
transformed = self.model[doc]
transformed_approx = matutils.sparse2full(transformed, 2) # better approximation
expected=[0.13, 0.87]
passed = numpy.allclose(sorted(transformed_approx), sorted(expected), atol=1e-1)
self.assertTrue(passed)
def testCSRMatrixConversion(self):
Arr = numpy.array([[1, 2, 0], [0, 0, 3], [1, 0, 0]])
sArr = sparse.csr_matrix(Arr)
newmodel = SklearnWrapperLdaModel(num_topics=2, passes=100)
newmodel.fit(sArr)
topic = newmodel.print_topics()
for k, v in topic:
self.assertTrue(isinstance(v, six.string_types))
self.assertTrue(isinstance(k, int))
def testPipeline(self):
model = SklearnWrapperLdaModel(num_topics=2, passes=10, minimum_probability=0, random_state=numpy.random.seed(0))
with open(datapath('mini_newsgroup'),'rb') as f:
compressed_content = f.read()
uncompressed_content = codecs.decode(compressed_content, 'zlib_codec')
cache = pickle.loads(uncompressed_content)
data = cache
id2word=Dictionary(map(lambda x : x.split(), data.data))
corpus = [id2word.doc2bow(i.split()) for i in data.data]
rand = numpy.random.mtrand.RandomState(1) # set seed for getting same result
clf=linear_model.LogisticRegression(penalty='l2', C=0.1)
text_lda = Pipeline((('features', model,), ('classifier', clf)))
text_lda.fit(corpus, data.target)
score = text_lda.score(corpus, data.target)
self.assertGreater(score, 0.40)
class TestSklearnLSIWrapper(unittest.TestCase):
def setUp(self):
self.model = SklearnWrapperLsiModel(id2word=dictionary, num_topics=2)
self.model.fit(corpus)
def testModelSanity(self):
topic = self.model.print_topics(2)
for k, v in topic:
self.assertTrue(isinstance(v, six.string_types))
self.assertTrue(isinstance(k, int))
def testTransform(self):
texts_new = ['graph','eulerian']
bow = self.model.id2word.doc2bow(texts_new)
X = self.model.transform(bow)
self.assertTrue(X.shape[0], 1)
self.assertTrue(X.shape[1], self.model.num_topics)
texts_new = [['graph','eulerian'],['server', 'flow'], ['path', 'system']]
bow = []
for i in texts_new:
bow.append(self.model.id2word.doc2bow(i))
X = self.model.transform(bow)
self.assertTrue(X.shape[0], 3)
self.assertTrue(X.shape[1], self.model.num_topics)
def testPartialFit(self):
for i in range(10):
self.model.partial_fit(X=corpus) # fit against the model again
doc=list(corpus)[0] # transform only the first document
transformed = self.model[doc]
transformed_approx = matutils.sparse2full(transformed, 2) # better approximation
expected=[1.39, 0.0]
passed = numpy.allclose(sorted(transformed_approx), sorted(expected), atol=1e-1)
self.assertTrue(passed)
def testPipeline(self):
model = SklearnWrapperLsiModel(num_topics=2)
with open(datapath('mini_newsgroup'),'rb') as f:
compressed_content = f.read()
uncompressed_content = codecs.decode(compressed_content, 'zlib_codec')
cache = pickle.loads(uncompressed_content)
data = cache
id2word=Dictionary(map(lambda x : x.split(), data.data))
corpus = [id2word.doc2bow(i.split()) for i in data.data]
clf=linear_model.LogisticRegression(penalty='l2', C=0.1)
text_lda = Pipeline((('features', model,), ('classifier', clf)))
text_lda.fit(corpus, data.target)
score = text_lda.score(corpus, data.target)
self.assertGreater(score, 0.50)
if __name__ == '__main__':
unittest.main()
|
mit
|
airanmehr/bio
|
Scripts/TimeSeriesPaper/Plot/EstimateN.py
|
1
|
3621
|
'''
Copyleft Jan 26, 2017 Arya Iranmehr, PhD Student, Bafna Lab, UC San Diego, Email: [email protected]
'''
import numpy as np;
np.set_printoptions(linewidth=200, precision=5, suppress=True)
import pandas as pd;
pd.options.display.max_rows = 20;
pd.options.display.expand_frame_repr = False
import seaborn as sns
import pylab as plt;
import matplotlib as mpl
import os;
import Utils.Plots as pplt
home = os.path.expanduser('~') + '/'
import Utils.Util as utl
fontSize=6
fig,axes=plt.subplots(2,3,figsize=(7,2.5),dpi=300)
ax=axes[0][0]
sns.set_style("whitegrid", {"grid.color": "0.9", 'axes.linewidth': .5, "grid.linewidth": ".09"})
fontSize=6
A=pd.read_pickle(utl.simoutpath+'EstimateN/likelihoods.df')
a=A[200].dropna();ax=axes[0][0]
sns.tsplot(a.T.unstack().reset_index(),time='level_0',value=0,unit='level_1',ci=95,ax=ax)
ax.set_xlabel('');ax.set_ylabel('')
ax.set_ylabel('log(lihelihood)');ax.set_title('(A)')
pplt.setSize(ax,fontSize);pplt.setLegendSize(ax,fontSize);
ax.locator_params(nbins=3)
xlim=ax.get_xlim();ax.set_xticks(np.append([200],ax.get_xticks()));ax.set_xlim(xlim)
a=A[600].dropna();ax=axes[0][1]
sns.tsplot(a.T.unstack().reset_index(),time='level_0',value=0,unit='level_1',ci=95,ax=ax)
ax.set_title('(B)')
ax.set_xlabel('');ax.set_ylabel('')
pplt.setSize(ax,fontSize);pplt.setLegendSize(ax,fontSize);
ax.locator_params(nbins=3)
xlim=ax.get_xlim();ax.set_xticks(np.append([600],ax.get_xticks()));ax.set_xlim(xlim)
a=A[1000].dropna();ax=axes[0][2]
sns.tsplot(a.T.unstack().reset_index(),time='level_0',value=0,unit='level_1',ci=95,ax=ax)
ax.set_title('(C)')
ax.set_xlabel('');ax.set_ylabel('')
ax.locator_params(nbins=3)
xlim=ax.get_xlim();ax.set_xticks(np.append([1000],ax.get_xticks()));ax.set_xlim(xlim)
# print plt.xlim([plt.xlim()[0],plt.xlim()[1]+1.0])
mpl.rc('font', **{'family': 'serif', 'serif': ['Computer Modern']}) ;
#ax.set_xlabel('$\widehat{N}$');ax.set_ylabel('Counts');
pplt.setSize(ax,fontSize);pplt.setLegendSize(ax,fontSize);
# plt.gcf().subplots_adjust(bottom=0.3)
# pplt.savefig('Nsim',300)
ax=axes[1][0]
A=pd.read_pickle('/home/arya/out/real/LikelihoodsN.Chrom.df')
A.plot(ax=ax,legend=False);ax.set_xlabel('$N$');ax.set_label('log(lihelihood)');
ax.set_title('(D)')
ax.set_xlabel('');ax.set_ylabel('')
ax.set_xlabel('$\widehat{N}$');ax.set_ylabel('log(likelihood)')
ax.locator_params(nbins=3);xlim=ax.get_xlim();ax.set_xticks(np.append([150,200,250],ax.get_xticks()));ax.set_xlim(xlim)
ax.legend(bbox_to_anchor=(-0.0, -0.05, 1., .102), loc=4,ncol=3,prop={'size': fontSize}, mode="expand", borderaxespad=0.)
pplt.setSize(ax,fontSize);
#pplt.setLegendSize(ax,fontSize);
ax=axes[1][1]
A=pd.read_pickle('/home/arya/out/real/LikelihoodsN.df')
sns.tsplot(A.T.unstack().reset_index(),time='level_0',value=0,unit='level_1',ci=99,ax=ax);ax.locator_params(axis='x',nbins=5)
ax.set_xlabel('');ax.set_ylabel('')
ax.set_xlabel('$\widehat{N}$')
ax.set_title('(E)')
ax.locator_params(nbins=3);xlim=ax.get_xlim();ax.set_xticks(np.append([250],ax.get_xticks()));ax.set_xlim(xlim)
pplt.setSize(ax,fontSize);pplt.setLegendSize(ax,fontSize);
A=pd.read_pickle('/home/arya/storage/Data/Yeast/BurkeYeast/LikelihoodsN.df')
ax=axes[1][2]
sns.tsplot(A.T.unstack().reset_index(),time='level_0',value=0,unit='level_1',ci=99,ax=ax);
ax.locator_params(nbins=3);xlim=ax.get_xlim();ax.set_xticks(np.append([2000],ax.get_xticks()));ax.set_xlim(xlim)
ax.set_xlabel('');ax.set_ylabel('')
ax.set_xlabel('$\widehat{N}$');plt.title('(F)')
pplt.setSize(ax,fontSize);pplt.setLegendSize(ax,fontSize);
plt.tight_layout(pad=0.25)
plt.gcf().subplots_adjust(bottom=0.15)
pplt.savefig('estimateN',300)
plt.show()
|
mit
|
RTHMaK/RPGOne
|
Documents/skflow-master/examples/iris.py
|
6
|
1182
|
# Copyright 2015-present Scikit Flow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sklearn import datasets, metrics, cross_validation
import skflow
# Load dataset.
iris = datasets.load_iris()
X_train, X_test, y_train, y_test = cross_validation.train_test_split(iris.data, iris.target,
test_size=0.2, random_state=42)
# Build 3 layer DNN with 10, 20, 10 units respecitvely.
classifier = skflow.TensorFlowDNNClassifier(hidden_units=[10, 20, 10],
n_classes=3, steps=200)
# Fit and predict.
classifier.fit(X_train, y_train)
score = metrics.accuracy_score(y_test, classifier.predict(X_test))
print('Accuracy: {0:f}'.format(score))
|
apache-2.0
|
gamahead/nupic
|
external/linux32/lib/python2.6/site-packages/matplotlib/_mathtext_data.py
|
69
|
57988
|
"""
font data tables for truetype and afm computer modern fonts
"""
# this dict maps symbol names to fontnames, glyphindex. To get the
# glyph index from the character code, you have to use get_charmap
"""
from matplotlib.ft2font import FT2Font
font = FT2Font('/usr/local/share/matplotlib/cmr10.ttf')
items = font.get_charmap().items()
items.sort()
for charcode, glyphind in items:
print charcode, glyphind
"""
latex_to_bakoma = {
r'\oint' : ('cmex10', 45),
r'\bigodot' : ('cmex10', 50),
r'\bigoplus' : ('cmex10', 55),
r'\bigotimes' : ('cmex10', 59),
r'\sum' : ('cmex10', 51),
r'\prod' : ('cmex10', 24),
r'\int' : ('cmex10', 56),
r'\bigcup' : ('cmex10', 28),
r'\bigcap' : ('cmex10', 60),
r'\biguplus' : ('cmex10', 32),
r'\bigwedge' : ('cmex10', 4),
r'\bigvee' : ('cmex10', 37),
r'\coprod' : ('cmex10', 42),
r'\__sqrt__' : ('cmex10', 48),
r'\leftbrace' : ('cmex10', 92),
r'{' : ('cmex10', 92),
r'\{' : ('cmex10', 92),
r'\rightbrace' : ('cmex10', 130),
r'}' : ('cmex10', 130),
r'\}' : ('cmex10', 130),
r'\leftangle' : ('cmex10', 97),
r'\rightangle' : ('cmex10', 64),
r'\langle' : ('cmex10', 97),
r'\rangle' : ('cmex10', 64),
r'\widehat' : ('cmex10', 15),
r'\widetilde' : ('cmex10', 52),
r'\omega' : ('cmmi10', 29),
r'\varepsilon' : ('cmmi10', 20),
r'\vartheta' : ('cmmi10', 22),
r'\varrho' : ('cmmi10', 61),
r'\varsigma' : ('cmmi10', 41),
r'\varphi' : ('cmmi10', 6),
r'\leftharpoonup' : ('cmmi10', 108),
r'\leftharpoondown' : ('cmmi10', 68),
r'\rightharpoonup' : ('cmmi10', 117),
r'\rightharpoondown' : ('cmmi10', 77),
r'\triangleright' : ('cmmi10', 130),
r'\triangleleft' : ('cmmi10', 89),
r'.' : ('cmmi10', 51),
r',' : ('cmmi10', 44),
r'<' : ('cmmi10', 99),
r'/' : ('cmmi10', 98),
r'>' : ('cmmi10', 107),
r'\flat' : ('cmmi10', 131),
r'\natural' : ('cmmi10', 90),
r'\sharp' : ('cmmi10', 50),
r'\smile' : ('cmmi10', 97),
r'\frown' : ('cmmi10', 58),
r'\ell' : ('cmmi10', 102),
r'\imath' : ('cmmi10', 8),
r'\jmath' : ('cmmi10', 65),
r'\wp' : ('cmmi10', 14),
r'\alpha' : ('cmmi10', 13),
r'\beta' : ('cmmi10', 35),
r'\gamma' : ('cmmi10', 24),
r'\delta' : ('cmmi10', 38),
r'\epsilon' : ('cmmi10', 54),
r'\zeta' : ('cmmi10', 10),
r'\eta' : ('cmmi10', 5),
r'\theta' : ('cmmi10', 18),
r'\iota' : ('cmmi10', 28),
r'\lambda' : ('cmmi10', 9),
r'\mu' : ('cmmi10', 32),
r'\nu' : ('cmmi10', 34),
r'\xi' : ('cmmi10', 7),
r'\pi' : ('cmmi10', 36),
r'\kappa' : ('cmmi10', 30),
r'\rho' : ('cmmi10', 39),
r'\sigma' : ('cmmi10', 21),
r'\tau' : ('cmmi10', 43),
r'\upsilon' : ('cmmi10', 25),
r'\phi' : ('cmmi10', 42),
r'\chi' : ('cmmi10', 17),
r'\psi' : ('cmmi10', 31),
r'|' : ('cmsy10', 47),
r'\|' : ('cmsy10', 47),
r'(' : ('cmr10', 119),
r'\leftparen' : ('cmr10', 119),
r'\rightparen' : ('cmr10', 68),
r')' : ('cmr10', 68),
r'+' : ('cmr10', 76),
r'0' : ('cmr10', 40),
r'1' : ('cmr10', 100),
r'2' : ('cmr10', 49),
r'3' : ('cmr10', 110),
r'4' : ('cmr10', 59),
r'5' : ('cmr10', 120),
r'6' : ('cmr10', 69),
r'7' : ('cmr10', 127),
r'8' : ('cmr10', 77),
r'9' : ('cmr10', 22),
r':' : ('cmr10', 85),
r';' : ('cmr10', 31),
r'=' : ('cmr10', 41),
r'\leftbracket' : ('cmr10', 62),
r'[' : ('cmr10', 62),
r'\rightbracket' : ('cmr10', 72),
r']' : ('cmr10', 72),
r'\%' : ('cmr10', 48),
r'%' : ('cmr10', 48),
r'\$' : ('cmr10', 99),
r'@' : ('cmr10', 111),
r'\_' : ('cmtt10', 79),
r'\Gamma' : ('cmr10', 19),
r'\Delta' : ('cmr10', 6),
r'\Theta' : ('cmr10', 7),
r'\Lambda' : ('cmr10', 14),
r'\Xi' : ('cmr10', 3),
r'\Pi' : ('cmr10', 17),
r'\Sigma' : ('cmr10', 10),
r'\Upsilon' : ('cmr10', 11),
r'\Phi' : ('cmr10', 9),
r'\Psi' : ('cmr10', 15),
r'\Omega' : ('cmr10', 12),
# these are mathml names, I think. I'm just using them for the
# tex methods noted
r'\circumflexaccent' : ('cmr10', 124), # for \hat
r'\combiningbreve' : ('cmr10', 81), # for \breve
r'\combiningoverline' : ('cmr10', 131), # for \bar
r'\combininggraveaccent' : ('cmr10', 114), # for \grave
r'\combiningacuteaccent' : ('cmr10', 63), # for \accute
r'\combiningdiaeresis' : ('cmr10', 91), # for \ddot
r'\combiningtilde' : ('cmr10', 75), # for \tilde
r'\combiningrightarrowabove' : ('cmmi10', 110), # for \vec
r'\combiningdotabove' : ('cmr10', 26), # for \dot
r'\leftarrow' : ('cmsy10', 10),
r'\uparrow' : ('cmsy10', 25),
r'\downarrow' : ('cmsy10', 28),
r'\leftrightarrow' : ('cmsy10', 24),
r'\nearrow' : ('cmsy10', 99),
r'\searrow' : ('cmsy10', 57),
r'\simeq' : ('cmsy10', 108),
r'\Leftarrow' : ('cmsy10', 104),
r'\Rightarrow' : ('cmsy10', 112),
r'\Uparrow' : ('cmsy10', 60),
r'\Downarrow' : ('cmsy10', 68),
r'\Leftrightarrow' : ('cmsy10', 51),
r'\nwarrow' : ('cmsy10', 65),
r'\swarrow' : ('cmsy10', 116),
r'\propto' : ('cmsy10', 15),
r'\prime' : ('cmsy10', 73),
r"'" : ('cmsy10', 73),
r'\infty' : ('cmsy10', 32),
r'\in' : ('cmsy10', 59),
r'\ni' : ('cmsy10', 122),
r'\bigtriangleup' : ('cmsy10', 80),
r'\bigtriangledown' : ('cmsy10', 132),
r'\slash' : ('cmsy10', 87),
r'\forall' : ('cmsy10', 21),
r'\exists' : ('cmsy10', 5),
r'\neg' : ('cmsy10', 20),
r'\emptyset' : ('cmsy10', 33),
r'\Re' : ('cmsy10', 95),
r'\Im' : ('cmsy10', 52),
r'\top' : ('cmsy10', 100),
r'\bot' : ('cmsy10', 11),
r'\aleph' : ('cmsy10', 26),
r'\cup' : ('cmsy10', 6),
r'\cap' : ('cmsy10', 19),
r'\uplus' : ('cmsy10', 58),
r'\wedge' : ('cmsy10', 43),
r'\vee' : ('cmsy10', 96),
r'\vdash' : ('cmsy10', 109),
r'\dashv' : ('cmsy10', 66),
r'\lfloor' : ('cmsy10', 117),
r'\rfloor' : ('cmsy10', 74),
r'\lceil' : ('cmsy10', 123),
r'\rceil' : ('cmsy10', 81),
r'\lbrace' : ('cmsy10', 92),
r'\rbrace' : ('cmsy10', 105),
r'\mid' : ('cmsy10', 47),
r'\vert' : ('cmsy10', 47),
r'\Vert' : ('cmsy10', 44),
r'\updownarrow' : ('cmsy10', 94),
r'\Updownarrow' : ('cmsy10', 53),
r'\backslash' : ('cmsy10', 126),
r'\wr' : ('cmsy10', 101),
r'\nabla' : ('cmsy10', 110),
r'\sqcup' : ('cmsy10', 67),
r'\sqcap' : ('cmsy10', 118),
r'\sqsubseteq' : ('cmsy10', 75),
r'\sqsupseteq' : ('cmsy10', 124),
r'\S' : ('cmsy10', 129),
r'\dag' : ('cmsy10', 71),
r'\ddag' : ('cmsy10', 127),
r'\P' : ('cmsy10', 130),
r'\clubsuit' : ('cmsy10', 18),
r'\diamondsuit' : ('cmsy10', 34),
r'\heartsuit' : ('cmsy10', 22),
r'-' : ('cmsy10', 17),
r'\cdot' : ('cmsy10', 78),
r'\times' : ('cmsy10', 13),
r'*' : ('cmsy10', 9),
r'\ast' : ('cmsy10', 9),
r'\div' : ('cmsy10', 31),
r'\diamond' : ('cmsy10', 48),
r'\pm' : ('cmsy10', 8),
r'\mp' : ('cmsy10', 98),
r'\oplus' : ('cmsy10', 16),
r'\ominus' : ('cmsy10', 56),
r'\otimes' : ('cmsy10', 30),
r'\oslash' : ('cmsy10', 107),
r'\odot' : ('cmsy10', 64),
r'\bigcirc' : ('cmsy10', 115),
r'\circ' : ('cmsy10', 72),
r'\bullet' : ('cmsy10', 84),
r'\asymp' : ('cmsy10', 121),
r'\equiv' : ('cmsy10', 35),
r'\subseteq' : ('cmsy10', 103),
r'\supseteq' : ('cmsy10', 42),
r'\leq' : ('cmsy10', 14),
r'\geq' : ('cmsy10', 29),
r'\preceq' : ('cmsy10', 79),
r'\succeq' : ('cmsy10', 131),
r'\sim' : ('cmsy10', 27),
r'\approx' : ('cmsy10', 23),
r'\subset' : ('cmsy10', 50),
r'\supset' : ('cmsy10', 86),
r'\ll' : ('cmsy10', 85),
r'\gg' : ('cmsy10', 40),
r'\prec' : ('cmsy10', 93),
r'\succ' : ('cmsy10', 49),
r'\rightarrow' : ('cmsy10', 12),
r'\to' : ('cmsy10', 12),
r'\spadesuit' : ('cmsy10', 7),
}
latex_to_cmex = {
r'\__sqrt__' : 112,
r'\bigcap' : 92,
r'\bigcup' : 91,
r'\bigodot' : 75,
r'\bigoplus' : 77,
r'\bigotimes' : 79,
r'\biguplus' : 93,
r'\bigvee' : 95,
r'\bigwedge' : 94,
r'\coprod' : 97,
r'\int' : 90,
r'\leftangle' : 173,
r'\leftbrace' : 169,
r'\oint' : 73,
r'\prod' : 89,
r'\rightangle' : 174,
r'\rightbrace' : 170,
r'\sum' : 88,
r'\widehat' : 98,
r'\widetilde' : 101,
}
latex_to_standard = {
r'\cong' : ('psyr', 64),
r'\Delta' : ('psyr', 68),
r'\Phi' : ('psyr', 70),
r'\Gamma' : ('psyr', 89),
r'\alpha' : ('psyr', 97),
r'\beta' : ('psyr', 98),
r'\chi' : ('psyr', 99),
r'\delta' : ('psyr', 100),
r'\varepsilon' : ('psyr', 101),
r'\phi' : ('psyr', 102),
r'\gamma' : ('psyr', 103),
r'\eta' : ('psyr', 104),
r'\iota' : ('psyr', 105),
r'\varpsi' : ('psyr', 106),
r'\kappa' : ('psyr', 108),
r'\nu' : ('psyr', 110),
r'\pi' : ('psyr', 112),
r'\theta' : ('psyr', 113),
r'\rho' : ('psyr', 114),
r'\sigma' : ('psyr', 115),
r'\tau' : ('psyr', 116),
r'\upsilon' : ('psyr', 117),
r'\varpi' : ('psyr', 118),
r'\omega' : ('psyr', 119),
r'\xi' : ('psyr', 120),
r'\psi' : ('psyr', 121),
r'\zeta' : ('psyr', 122),
r'\sim' : ('psyr', 126),
r'\leq' : ('psyr', 163),
r'\infty' : ('psyr', 165),
r'\clubsuit' : ('psyr', 167),
r'\diamondsuit' : ('psyr', 168),
r'\heartsuit' : ('psyr', 169),
r'\spadesuit' : ('psyr', 170),
r'\leftrightarrow' : ('psyr', 171),
r'\leftarrow' : ('psyr', 172),
r'\uparrow' : ('psyr', 173),
r'\rightarrow' : ('psyr', 174),
r'\downarrow' : ('psyr', 175),
r'\pm' : ('psyr', 176),
r'\geq' : ('psyr', 179),
r'\times' : ('psyr', 180),
r'\propto' : ('psyr', 181),
r'\partial' : ('psyr', 182),
r'\bullet' : ('psyr', 183),
r'\div' : ('psyr', 184),
r'\neq' : ('psyr', 185),
r'\equiv' : ('psyr', 186),
r'\approx' : ('psyr', 187),
r'\ldots' : ('psyr', 188),
r'\aleph' : ('psyr', 192),
r'\Im' : ('psyr', 193),
r'\Re' : ('psyr', 194),
r'\wp' : ('psyr', 195),
r'\otimes' : ('psyr', 196),
r'\oplus' : ('psyr', 197),
r'\oslash' : ('psyr', 198),
r'\cap' : ('psyr', 199),
r'\cup' : ('psyr', 200),
r'\supset' : ('psyr', 201),
r'\supseteq' : ('psyr', 202),
r'\subset' : ('psyr', 204),
r'\subseteq' : ('psyr', 205),
r'\in' : ('psyr', 206),
r'\notin' : ('psyr', 207),
r'\angle' : ('psyr', 208),
r'\nabla' : ('psyr', 209),
r'\textregistered' : ('psyr', 210),
r'\copyright' : ('psyr', 211),
r'\texttrademark' : ('psyr', 212),
r'\Pi' : ('psyr', 213),
r'\prod' : ('psyr', 213),
r'\surd' : ('psyr', 214),
r'\__sqrt__' : ('psyr', 214),
r'\cdot' : ('psyr', 215),
r'\urcorner' : ('psyr', 216),
r'\vee' : ('psyr', 217),
r'\wedge' : ('psyr', 218),
r'\Leftrightarrow' : ('psyr', 219),
r'\Leftarrow' : ('psyr', 220),
r'\Uparrow' : ('psyr', 221),
r'\Rightarrow' : ('psyr', 222),
r'\Downarrow' : ('psyr', 223),
r'\Diamond' : ('psyr', 224),
r'\langle' : ('psyr', 225),
r'\Sigma' : ('psyr', 229),
r'\sum' : ('psyr', 229),
r'\forall' : ('psyr', 34),
r'\exists' : ('psyr', 36),
r'\lceil' : ('psyr', 233),
r'\lbrace' : ('psyr', 123),
r'\Psi' : ('psyr', 89),
r'\bot' : ('psyr', 0136),
r'\Omega' : ('psyr', 0127),
r'\leftbracket' : ('psyr', 0133),
r'\rightbracket' : ('psyr', 0135),
r'\leftbrace' : ('psyr', 123),
r'\leftparen' : ('psyr', 050),
r'\prime' : ('psyr', 0242),
r'\sharp' : ('psyr', 043),
r'\slash' : ('psyr', 057),
r'\Lamda' : ('psyr', 0114),
r'\neg' : ('psyr', 0330),
r'\Upsilon' : ('psyr', 0241),
r'\rightbrace' : ('psyr', 0175),
r'\rfloor' : ('psyr', 0373),
r'\lambda' : ('psyr', 0154),
r'\to' : ('psyr', 0256),
r'\Xi' : ('psyr', 0130),
r'\emptyset' : ('psyr', 0306),
r'\lfloor' : ('psyr', 0353),
r'\rightparen' : ('psyr', 051),
r'\rceil' : ('psyr', 0371),
r'\ni' : ('psyr', 047),
r'\epsilon' : ('psyr', 0145),
r'\Theta' : ('psyr', 0121),
r'\langle' : ('psyr', 0341),
r'\leftangle' : ('psyr', 0341),
r'\rangle' : ('psyr', 0361),
r'\rightangle' : ('psyr', 0361),
r'\rbrace' : ('psyr', 0175),
r'\circ' : ('psyr', 0260),
r'\diamond' : ('psyr', 0340),
r'\mu' : ('psyr', 0155),
r'\mid' : ('psyr', 0352),
r'\imath' : ('pncri8a', 105),
r'\%' : ('pncr8a', 37),
r'\$' : ('pncr8a', 36),
r'\{' : ('pncr8a', 123),
r'\}' : ('pncr8a', 125),
r'\backslash' : ('pncr8a', 92),
r'\ast' : ('pncr8a', 42),
r'\circumflexaccent' : ('pncri8a', 124), # for \hat
r'\combiningbreve' : ('pncri8a', 81), # for \breve
r'\combininggraveaccent' : ('pncri8a', 114), # for \grave
r'\combiningacuteaccent' : ('pncri8a', 63), # for \accute
r'\combiningdiaeresis' : ('pncri8a', 91), # for \ddot
r'\combiningtilde' : ('pncri8a', 75), # for \tilde
r'\combiningrightarrowabove' : ('pncri8a', 110), # for \vec
r'\combiningdotabove' : ('pncri8a', 26), # for \dot
}
# Automatically generated.
type12uni = {'uni24C8': 9416,
'aring': 229,
'uni22A0': 8864,
'uni2292': 8850,
'quotedblright': 8221,
'uni03D2': 978,
'uni2215': 8725,
'uni03D0': 976,
'V': 86,
'dollar': 36,
'uni301E': 12318,
'uni03D5': 981,
'four': 52,
'uni25A0': 9632,
'uni013C': 316,
'uni013B': 315,
'uni013E': 318,
'Yacute': 221,
'uni25DE': 9694,
'uni013F': 319,
'uni255A': 9562,
'uni2606': 9734,
'uni0180': 384,
'uni22B7': 8887,
'uni044F': 1103,
'uni22B5': 8885,
'uni22B4': 8884,
'uni22AE': 8878,
'uni22B2': 8882,
'uni22B1': 8881,
'uni22B0': 8880,
'uni25CD': 9677,
'uni03CE': 974,
'uni03CD': 973,
'uni03CC': 972,
'uni03CB': 971,
'uni03CA': 970,
'uni22B8': 8888,
'uni22C9': 8905,
'uni0449': 1097,
'uni20DD': 8413,
'uni20DC': 8412,
'uni20DB': 8411,
'uni2231': 8753,
'uni25CF': 9679,
'uni306E': 12398,
'uni03D1': 977,
'uni01A1': 417,
'uni20D7': 8407,
'uni03D6': 982,
'uni2233': 8755,
'uni20D2': 8402,
'uni20D1': 8401,
'uni20D0': 8400,
'P': 80,
'uni22BE': 8894,
'uni22BD': 8893,
'uni22BC': 8892,
'uni22BB': 8891,
'underscore': 95,
'uni03C8': 968,
'uni03C7': 967,
'uni0328': 808,
'uni03C5': 965,
'uni03C4': 964,
'uni03C3': 963,
'uni03C2': 962,
'uni03C1': 961,
'uni03C0': 960,
'uni2010': 8208,
'uni0130': 304,
'uni0133': 307,
'uni0132': 306,
'uni0135': 309,
'uni0134': 308,
'uni0137': 311,
'uni0136': 310,
'uni0139': 313,
'uni0138': 312,
'uni2244': 8772,
'uni229A': 8858,
'uni2571': 9585,
'uni0278': 632,
'uni2239': 8761,
'p': 112,
'uni3019': 12313,
'uni25CB': 9675,
'uni03DB': 987,
'uni03DC': 988,
'uni03DA': 986,
'uni03DF': 991,
'uni03DD': 989,
'uni013D': 317,
'uni220A': 8714,
'uni220C': 8716,
'uni220B': 8715,
'uni220E': 8718,
'uni220D': 8717,
'uni220F': 8719,
'uni22CC': 8908,
'Otilde': 213,
'uni25E5': 9701,
'uni2736': 10038,
'perthousand': 8240,
'zero': 48,
'uni279B': 10139,
'dotlessi': 305,
'uni2279': 8825,
'Scaron': 352,
'zcaron': 382,
'uni21D8': 8664,
'egrave': 232,
'uni0271': 625,
'uni01AA': 426,
'uni2332': 9010,
'section': 167,
'uni25E4': 9700,
'Icircumflex': 206,
'ntilde': 241,
'uni041E': 1054,
'ampersand': 38,
'uni041C': 1052,
'uni041A': 1050,
'uni22AB': 8875,
'uni21DB': 8667,
'dotaccent': 729,
'uni0416': 1046,
'uni0417': 1047,
'uni0414': 1044,
'uni0415': 1045,
'uni0412': 1042,
'uni0413': 1043,
'degree': 176,
'uni0411': 1041,
'K': 75,
'uni25EB': 9707,
'uni25EF': 9711,
'uni0418': 1048,
'uni0419': 1049,
'uni2263': 8803,
'uni226E': 8814,
'uni2251': 8785,
'uni02C8': 712,
'uni2262': 8802,
'acircumflex': 226,
'uni22B3': 8883,
'uni2261': 8801,
'uni2394': 9108,
'Aring': 197,
'uni2260': 8800,
'uni2254': 8788,
'uni0436': 1078,
'uni2267': 8807,
'k': 107,
'uni22C8': 8904,
'uni226A': 8810,
'uni231F': 8991,
'smalltilde': 732,
'uni2201': 8705,
'uni2200': 8704,
'uni2203': 8707,
'uni02BD': 701,
'uni2205': 8709,
'uni2204': 8708,
'Agrave': 192,
'uni2206': 8710,
'uni2209': 8713,
'uni2208': 8712,
'uni226D': 8813,
'uni2264': 8804,
'uni263D': 9789,
'uni2258': 8792,
'uni02D3': 723,
'uni02D2': 722,
'uni02D1': 721,
'uni02D0': 720,
'uni25E1': 9697,
'divide': 247,
'uni02D5': 725,
'uni02D4': 724,
'ocircumflex': 244,
'uni2524': 9508,
'uni043A': 1082,
'uni24CC': 9420,
'asciitilde': 126,
'uni22B9': 8889,
'uni24D2': 9426,
'uni211E': 8478,
'uni211D': 8477,
'uni24DD': 9437,
'uni211A': 8474,
'uni211C': 8476,
'uni211B': 8475,
'uni25C6': 9670,
'uni017F': 383,
'uni017A': 378,
'uni017C': 380,
'uni017B': 379,
'uni0346': 838,
'uni22F1': 8945,
'uni22F0': 8944,
'two': 50,
'uni2298': 8856,
'uni24D1': 9425,
'E': 69,
'uni025D': 605,
'scaron': 353,
'uni2322': 8994,
'uni25E3': 9699,
'uni22BF': 8895,
'F': 70,
'uni0440': 1088,
'uni255E': 9566,
'uni22BA': 8890,
'uni0175': 373,
'uni0174': 372,
'uni0177': 375,
'uni0176': 374,
'bracketleft': 91,
'uni0170': 368,
'uni0173': 371,
'uni0172': 370,
'asciicircum': 94,
'uni0179': 377,
'uni2590': 9616,
'uni25E2': 9698,
'uni2119': 8473,
'uni2118': 8472,
'uni25CC': 9676,
'f': 102,
'ordmasculine': 186,
'uni229B': 8859,
'uni22A1': 8865,
'uni2111': 8465,
'uni2110': 8464,
'uni2113': 8467,
'uni2112': 8466,
'mu': 181,
'uni2281': 8833,
'paragraph': 182,
'nine': 57,
'uni25EC': 9708,
'v': 118,
'uni040C': 1036,
'uni0113': 275,
'uni22D0': 8912,
'uni21CC': 8652,
'uni21CB': 8651,
'uni21CA': 8650,
'uni22A5': 8869,
'uni21CF': 8655,
'uni21CE': 8654,
'uni21CD': 8653,
'guilsinglleft': 8249,
'backslash': 92,
'uni2284': 8836,
'uni224E': 8782,
'uni224D': 8781,
'uni224F': 8783,
'uni224A': 8778,
'uni2287': 8839,
'uni224C': 8780,
'uni224B': 8779,
'uni21BD': 8637,
'uni2286': 8838,
'uni030F': 783,
'uni030D': 781,
'uni030E': 782,
'uni030B': 779,
'uni030C': 780,
'uni030A': 778,
'uni026E': 622,
'uni026D': 621,
'six': 54,
'uni026A': 618,
'uni026C': 620,
'uni25C1': 9665,
'uni20D6': 8406,
'uni045B': 1115,
'uni045C': 1116,
'uni256B': 9579,
'uni045A': 1114,
'uni045F': 1119,
'uni045E': 1118,
'A': 65,
'uni2569': 9577,
'uni0458': 1112,
'uni0459': 1113,
'uni0452': 1106,
'uni0453': 1107,
'uni2562': 9570,
'uni0451': 1105,
'uni0456': 1110,
'uni0457': 1111,
'uni0454': 1108,
'uni0455': 1109,
'icircumflex': 238,
'uni0307': 775,
'uni0304': 772,
'uni0305': 773,
'uni0269': 617,
'uni0268': 616,
'uni0300': 768,
'uni0301': 769,
'uni0265': 613,
'uni0264': 612,
'uni0267': 615,
'uni0266': 614,
'uni0261': 609,
'uni0260': 608,
'uni0263': 611,
'uni0262': 610,
'a': 97,
'uni2207': 8711,
'uni2247': 8775,
'uni2246': 8774,
'uni2241': 8769,
'uni2240': 8768,
'uni2243': 8771,
'uni2242': 8770,
'uni2312': 8978,
'ogonek': 731,
'uni2249': 8777,
'uni2248': 8776,
'uni3030': 12336,
'q': 113,
'uni21C2': 8642,
'uni21C1': 8641,
'uni21C0': 8640,
'uni21C7': 8647,
'uni21C6': 8646,
'uni21C5': 8645,
'uni21C4': 8644,
'uni225F': 8799,
'uni212C': 8492,
'uni21C8': 8648,
'uni2467': 9319,
'oacute': 243,
'uni028F': 655,
'uni028E': 654,
'uni026F': 623,
'uni028C': 652,
'uni028B': 651,
'uni028A': 650,
'uni2510': 9488,
'ograve': 242,
'edieresis': 235,
'uni22CE': 8910,
'uni22CF': 8911,
'uni219F': 8607,
'comma': 44,
'uni22CA': 8906,
'uni0429': 1065,
'uni03C6': 966,
'uni0427': 1063,
'uni0426': 1062,
'uni0425': 1061,
'uni0424': 1060,
'uni0423': 1059,
'uni0422': 1058,
'uni0421': 1057,
'uni0420': 1056,
'uni2465': 9317,
'uni24D0': 9424,
'uni2464': 9316,
'uni0430': 1072,
'otilde': 245,
'uni2661': 9825,
'uni24D6': 9430,
'uni2466': 9318,
'uni24D5': 9429,
'uni219A': 8602,
'uni2518': 9496,
'uni22B6': 8886,
'uni2461': 9313,
'uni24D4': 9428,
'uni2460': 9312,
'uni24EA': 9450,
'guillemotright': 187,
'ecircumflex': 234,
'greater': 62,
'uni2011': 8209,
'uacute': 250,
'uni2462': 9314,
'L': 76,
'bullet': 8226,
'uni02A4': 676,
'uni02A7': 679,
'cedilla': 184,
'uni02A2': 674,
'uni2015': 8213,
'uni22C4': 8900,
'uni22C5': 8901,
'uni22AD': 8877,
'uni22C7': 8903,
'uni22C0': 8896,
'uni2016': 8214,
'uni22C2': 8898,
'uni22C3': 8899,
'uni24CF': 9423,
'uni042F': 1071,
'uni042E': 1070,
'uni042D': 1069,
'ydieresis': 255,
'l': 108,
'logicalnot': 172,
'uni24CA': 9418,
'uni0287': 647,
'uni0286': 646,
'uni0285': 645,
'uni0284': 644,
'uni0283': 643,
'uni0282': 642,
'uni0281': 641,
'uni027C': 636,
'uni2664': 9828,
'exclamdown': 161,
'uni25C4': 9668,
'uni0289': 649,
'uni0288': 648,
'uni039A': 922,
'endash': 8211,
'uni2640': 9792,
'uni20E4': 8420,
'uni0473': 1139,
'uni20E1': 8417,
'uni2642': 9794,
'uni03B8': 952,
'uni03B9': 953,
'agrave': 224,
'uni03B4': 948,
'uni03B5': 949,
'uni03B6': 950,
'uni03B7': 951,
'uni03B0': 944,
'uni03B1': 945,
'uni03B2': 946,
'uni03B3': 947,
'uni2555': 9557,
'Adieresis': 196,
'germandbls': 223,
'Odieresis': 214,
'space': 32,
'uni0126': 294,
'uni0127': 295,
'uni0124': 292,
'uni0125': 293,
'uni0122': 290,
'uni0123': 291,
'uni0120': 288,
'uni0121': 289,
'quoteright': 8217,
'uni2560': 9568,
'uni2556': 9558,
'ucircumflex': 251,
'uni2561': 9569,
'uni2551': 9553,
'uni25B2': 9650,
'uni2550': 9552,
'uni2563': 9571,
'uni2553': 9555,
'G': 71,
'uni2564': 9572,
'uni2552': 9554,
'quoteleft': 8216,
'uni2565': 9573,
'uni2572': 9586,
'uni2568': 9576,
'uni2566': 9574,
'W': 87,
'uni214A': 8522,
'uni012F': 303,
'uni012D': 301,
'uni012E': 302,
'uni012B': 299,
'uni012C': 300,
'uni255C': 9564,
'uni012A': 298,
'uni2289': 8841,
'Q': 81,
'uni2320': 8992,
'uni2321': 8993,
'g': 103,
'uni03BD': 957,
'uni03BE': 958,
'uni03BF': 959,
'uni2282': 8834,
'uni2285': 8837,
'uni03BA': 954,
'uni03BB': 955,
'uni03BC': 956,
'uni2128': 8488,
'uni25B7': 9655,
'w': 119,
'uni0302': 770,
'uni03DE': 990,
'uni25DA': 9690,
'uni0303': 771,
'uni0463': 1123,
'uni0462': 1122,
'uni3018': 12312,
'uni2514': 9492,
'question': 63,
'uni25B3': 9651,
'uni24E1': 9441,
'one': 49,
'uni200A': 8202,
'uni2278': 8824,
'ring': 730,
'uni0195': 405,
'figuredash': 8210,
'uni22EC': 8940,
'uni0339': 825,
'uni0338': 824,
'uni0337': 823,
'uni0336': 822,
'uni0335': 821,
'uni0333': 819,
'uni0332': 818,
'uni0331': 817,
'uni0330': 816,
'uni01C1': 449,
'uni01C0': 448,
'uni01C3': 451,
'uni01C2': 450,
'uni2353': 9043,
'uni0308': 776,
'uni2218': 8728,
'uni2219': 8729,
'uni2216': 8726,
'uni2217': 8727,
'uni2214': 8724,
'uni0309': 777,
'uni2609': 9737,
'uni2213': 8723,
'uni2210': 8720,
'uni2211': 8721,
'uni2245': 8773,
'B': 66,
'uni25D6': 9686,
'iacute': 237,
'uni02E6': 742,
'uni02E7': 743,
'uni02E8': 744,
'uni02E9': 745,
'uni221D': 8733,
'uni221E': 8734,
'Ydieresis': 376,
'uni221C': 8732,
'uni22D7': 8919,
'uni221A': 8730,
'R': 82,
'uni24DC': 9436,
'uni033F': 831,
'uni033E': 830,
'uni033C': 828,
'uni033B': 827,
'uni033A': 826,
'b': 98,
'uni228A': 8842,
'uni22DB': 8923,
'uni2554': 9556,
'uni046B': 1131,
'uni046A': 1130,
'r': 114,
'uni24DB': 9435,
'Ccedilla': 199,
'minus': 8722,
'uni24DA': 9434,
'uni03F0': 1008,
'uni03F1': 1009,
'uni20AC': 8364,
'uni2276': 8822,
'uni24C0': 9408,
'uni0162': 354,
'uni0163': 355,
'uni011E': 286,
'uni011D': 285,
'uni011C': 284,
'uni011B': 283,
'uni0164': 356,
'uni0165': 357,
'Lslash': 321,
'uni0168': 360,
'uni0169': 361,
'uni25C9': 9673,
'uni02E5': 741,
'uni21C3': 8643,
'uni24C4': 9412,
'uni24E2': 9442,
'uni2277': 8823,
'uni013A': 314,
'uni2102': 8450,
'Uacute': 218,
'uni2317': 8983,
'uni2107': 8455,
'uni221F': 8735,
'yacute': 253,
'uni3012': 12306,
'Ucircumflex': 219,
'uni015D': 349,
'quotedbl': 34,
'uni25D9': 9689,
'uni2280': 8832,
'uni22AF': 8879,
'onehalf': 189,
'uni221B': 8731,
'Thorn': 222,
'uni2226': 8742,
'M': 77,
'uni25BA': 9658,
'uni2463': 9315,
'uni2336': 9014,
'eight': 56,
'uni2236': 8758,
'multiply': 215,
'uni210C': 8460,
'uni210A': 8458,
'uni21C9': 8649,
'grave': 96,
'uni210E': 8462,
'uni0117': 279,
'uni016C': 364,
'uni0115': 277,
'uni016A': 362,
'uni016F': 367,
'uni0112': 274,
'uni016D': 365,
'uni016E': 366,
'Ocircumflex': 212,
'uni2305': 8965,
'm': 109,
'uni24DF': 9439,
'uni0119': 281,
'uni0118': 280,
'uni20A3': 8355,
'uni20A4': 8356,
'uni20A7': 8359,
'uni2288': 8840,
'uni24C3': 9411,
'uni251C': 9500,
'uni228D': 8845,
'uni222F': 8751,
'uni222E': 8750,
'uni222D': 8749,
'uni222C': 8748,
'uni222B': 8747,
'uni222A': 8746,
'uni255B': 9563,
'Ugrave': 217,
'uni24DE': 9438,
'guilsinglright': 8250,
'uni250A': 9482,
'Ntilde': 209,
'uni0279': 633,
'questiondown': 191,
'uni256C': 9580,
'Atilde': 195,
'uni0272': 626,
'uni0273': 627,
'uni0270': 624,
'ccedilla': 231,
'uni0276': 630,
'uni0277': 631,
'uni0274': 628,
'uni0275': 629,
'uni2252': 8786,
'uni041F': 1055,
'uni2250': 8784,
'Z': 90,
'uni2256': 8790,
'uni2257': 8791,
'copyright': 169,
'uni2255': 8789,
'uni043D': 1085,
'uni043E': 1086,
'uni043F': 1087,
'yen': 165,
'uni041D': 1053,
'uni043B': 1083,
'uni043C': 1084,
'uni21B0': 8624,
'uni21B1': 8625,
'uni21B2': 8626,
'uni21B3': 8627,
'uni21B4': 8628,
'uni21B5': 8629,
'uni21B6': 8630,
'uni21B7': 8631,
'uni21B8': 8632,
'Eacute': 201,
'uni2311': 8977,
'uni2310': 8976,
'uni228F': 8847,
'uni25DB': 9691,
'uni21BA': 8634,
'uni21BB': 8635,
'uni21BC': 8636,
'uni2017': 8215,
'uni21BE': 8638,
'uni21BF': 8639,
'uni231C': 8988,
'H': 72,
'uni0293': 659,
'uni2202': 8706,
'uni22A4': 8868,
'uni231E': 8990,
'uni2232': 8754,
'uni225B': 8795,
'uni225C': 8796,
'uni24D9': 9433,
'uni225A': 8794,
'uni0438': 1080,
'uni0439': 1081,
'uni225D': 8797,
'uni225E': 8798,
'uni0434': 1076,
'X': 88,
'uni007F': 127,
'uni0437': 1079,
'Idieresis': 207,
'uni0431': 1073,
'uni0432': 1074,
'uni0433': 1075,
'uni22AC': 8876,
'uni22CD': 8909,
'uni25A3': 9635,
'bar': 124,
'uni24BB': 9403,
'uni037E': 894,
'uni027B': 635,
'h': 104,
'uni027A': 634,
'uni027F': 639,
'uni027D': 637,
'uni027E': 638,
'uni2227': 8743,
'uni2004': 8196,
'uni2225': 8741,
'uni2224': 8740,
'uni2223': 8739,
'uni2222': 8738,
'uni2221': 8737,
'uni2220': 8736,
'x': 120,
'uni2323': 8995,
'uni2559': 9561,
'uni2558': 9560,
'uni2229': 8745,
'uni2228': 8744,
'udieresis': 252,
'uni029D': 669,
'ordfeminine': 170,
'uni22CB': 8907,
'uni233D': 9021,
'uni0428': 1064,
'uni24C6': 9414,
'uni22DD': 8925,
'uni24C7': 9415,
'uni015C': 348,
'uni015B': 347,
'uni015A': 346,
'uni22AA': 8874,
'uni015F': 351,
'uni015E': 350,
'braceleft': 123,
'uni24C5': 9413,
'uni0410': 1040,
'uni03AA': 938,
'uni24C2': 9410,
'uni03AC': 940,
'uni03AB': 939,
'macron': 175,
'uni03AD': 941,
'uni03AF': 943,
'uni0294': 660,
'uni0295': 661,
'uni0296': 662,
'uni0297': 663,
'uni0290': 656,
'uni0291': 657,
'uni0292': 658,
'atilde': 227,
'Acircumflex': 194,
'uni2370': 9072,
'uni24C1': 9409,
'uni0298': 664,
'uni0299': 665,
'Oslash': 216,
'uni029E': 670,
'C': 67,
'quotedblleft': 8220,
'uni029B': 667,
'uni029C': 668,
'uni03A9': 937,
'uni03A8': 936,
'S': 83,
'uni24C9': 9417,
'uni03A1': 929,
'uni03A0': 928,
'exclam': 33,
'uni03A5': 933,
'uni03A4': 932,
'uni03A7': 935,
'Zcaron': 381,
'uni2133': 8499,
'uni2132': 8498,
'uni0159': 345,
'uni0158': 344,
'uni2137': 8503,
'uni2005': 8197,
'uni2135': 8501,
'uni2134': 8500,
'uni02BA': 698,
'uni2033': 8243,
'uni0151': 337,
'uni0150': 336,
'uni0157': 343,
'equal': 61,
'uni0155': 341,
'uni0154': 340,
's': 115,
'uni233F': 9023,
'eth': 240,
'uni24BE': 9406,
'uni21E9': 8681,
'uni2060': 8288,
'Egrave': 200,
'uni255D': 9565,
'uni24CD': 9421,
'uni21E1': 8673,
'uni21B9': 8633,
'hyphen': 45,
'uni01BE': 446,
'uni01BB': 443,
'period': 46,
'igrave': 236,
'uni01BA': 442,
'uni2296': 8854,
'uni2297': 8855,
'uni2294': 8852,
'uni2295': 8853,
'colon': 58,
'uni2293': 8851,
'uni2290': 8848,
'uni2291': 8849,
'uni032D': 813,
'uni032E': 814,
'uni032F': 815,
'uni032A': 810,
'uni032B': 811,
'uni032C': 812,
'uni231D': 8989,
'Ecircumflex': 202,
'uni24D7': 9431,
'uni25DD': 9693,
'trademark': 8482,
'Aacute': 193,
'cent': 162,
'uni0445': 1093,
'uni266E': 9838,
'uni266D': 9837,
'uni266B': 9835,
'uni03C9': 969,
'uni2003': 8195,
'uni2047': 8263,
'lslash': 322,
'uni03A6': 934,
'uni2043': 8259,
'uni250C': 9484,
'uni2040': 8256,
'uni255F': 9567,
'uni24CB': 9419,
'uni0472': 1138,
'uni0446': 1094,
'uni0474': 1140,
'uni0475': 1141,
'uni2508': 9480,
'uni2660': 9824,
'uni2506': 9478,
'uni2502': 9474,
'c': 99,
'uni2500': 9472,
'N': 78,
'uni22A6': 8870,
'uni21E7': 8679,
'uni2130': 8496,
'uni2002': 8194,
'breve': 728,
'uni0442': 1090,
'Oacute': 211,
'uni229F': 8863,
'uni25C7': 9671,
'uni229D': 8861,
'uni229E': 8862,
'guillemotleft': 171,
'uni0329': 809,
'uni24E5': 9445,
'uni011F': 287,
'uni0324': 804,
'uni0325': 805,
'uni0326': 806,
'uni0327': 807,
'uni0321': 801,
'uni0322': 802,
'n': 110,
'uni2032': 8242,
'uni2269': 8809,
'uni2268': 8808,
'uni0306': 774,
'uni226B': 8811,
'uni21EA': 8682,
'uni0166': 358,
'uni203B': 8251,
'uni01B5': 437,
'idieresis': 239,
'uni02BC': 700,
'uni01B0': 432,
'braceright': 125,
'seven': 55,
'uni02BB': 699,
'uni011A': 282,
'uni29FB': 10747,
'brokenbar': 166,
'uni2036': 8246,
'uni25C0': 9664,
'uni0156': 342,
'uni22D5': 8917,
'uni0258': 600,
'ugrave': 249,
'uni22D6': 8918,
'uni22D1': 8913,
'uni2034': 8244,
'uni22D3': 8915,
'uni22D2': 8914,
'uni203C': 8252,
'uni223E': 8766,
'uni02BF': 703,
'uni22D9': 8921,
'uni22D8': 8920,
'uni25BD': 9661,
'uni25BE': 9662,
'uni25BF': 9663,
'uni041B': 1051,
'periodcentered': 183,
'uni25BC': 9660,
'uni019E': 414,
'uni019B': 411,
'uni019A': 410,
'uni2007': 8199,
'uni0391': 913,
'uni0390': 912,
'uni0393': 915,
'uni0392': 914,
'uni0395': 917,
'uni0394': 916,
'uni0397': 919,
'uni0396': 918,
'uni0399': 921,
'uni0398': 920,
'uni25C8': 9672,
'uni2468': 9320,
'sterling': 163,
'uni22EB': 8939,
'uni039C': 924,
'uni039B': 923,
'uni039E': 926,
'uni039D': 925,
'uni039F': 927,
'I': 73,
'uni03E1': 993,
'uni03E0': 992,
'uni2319': 8985,
'uni228B': 8843,
'uni25B5': 9653,
'uni25B6': 9654,
'uni22EA': 8938,
'uni24B9': 9401,
'uni044E': 1102,
'uni0199': 409,
'uni2266': 8806,
'Y': 89,
'uni22A2': 8866,
'Eth': 208,
'uni266F': 9839,
'emdash': 8212,
'uni263B': 9787,
'uni24BD': 9405,
'uni22DE': 8926,
'uni0360': 864,
'uni2557': 9559,
'uni22DF': 8927,
'uni22DA': 8922,
'uni22DC': 8924,
'uni0361': 865,
'i': 105,
'uni24BF': 9407,
'uni0362': 866,
'uni263E': 9790,
'uni028D': 653,
'uni2259': 8793,
'uni0323': 803,
'uni2265': 8805,
'daggerdbl': 8225,
'y': 121,
'uni010A': 266,
'plusminus': 177,
'less': 60,
'uni21AE': 8622,
'uni0315': 789,
'uni230B': 8971,
'uni21AF': 8623,
'uni21AA': 8618,
'uni21AC': 8620,
'uni21AB': 8619,
'uni01FB': 507,
'uni01FC': 508,
'uni223A': 8762,
'uni01FA': 506,
'uni01FF': 511,
'uni01FD': 509,
'uni01FE': 510,
'uni2567': 9575,
'uni25E0': 9696,
'uni0104': 260,
'uni0105': 261,
'uni0106': 262,
'uni0107': 263,
'uni0100': 256,
'uni0101': 257,
'uni0102': 258,
'uni0103': 259,
'uni2038': 8248,
'uni2009': 8201,
'uni2008': 8200,
'uni0108': 264,
'uni0109': 265,
'uni02A1': 673,
'uni223B': 8763,
'uni226C': 8812,
'uni25AC': 9644,
'uni24D3': 9427,
'uni21E0': 8672,
'uni21E3': 8675,
'Udieresis': 220,
'uni21E2': 8674,
'D': 68,
'uni21E5': 8677,
'uni2621': 9761,
'uni21D1': 8657,
'uni203E': 8254,
'uni22C6': 8902,
'uni21E4': 8676,
'uni010D': 269,
'uni010E': 270,
'uni010F': 271,
'five': 53,
'T': 84,
'uni010B': 267,
'uni010C': 268,
'uni2605': 9733,
'uni2663': 9827,
'uni21E6': 8678,
'uni24B6': 9398,
'uni22C1': 8897,
'oslash': 248,
'acute': 180,
'uni01F0': 496,
'd': 100,
'OE': 338,
'uni22E3': 8931,
'Igrave': 204,
'uni2308': 8968,
'uni2309': 8969,
'uni21A9': 8617,
't': 116,
'uni2313': 8979,
'uni03A3': 931,
'uni21A4': 8612,
'uni21A7': 8615,
'uni21A6': 8614,
'uni21A1': 8609,
'uni21A0': 8608,
'uni21A3': 8611,
'uni21A2': 8610,
'parenright': 41,
'uni256A': 9578,
'uni25DC': 9692,
'uni24CE': 9422,
'uni042C': 1068,
'uni24E0': 9440,
'uni042B': 1067,
'uni0409': 1033,
'uni0408': 1032,
'uni24E7': 9447,
'uni25B4': 9652,
'uni042A': 1066,
'uni228E': 8846,
'uni0401': 1025,
'adieresis': 228,
'uni0403': 1027,
'quotesingle': 39,
'uni0405': 1029,
'uni0404': 1028,
'uni0407': 1031,
'uni0406': 1030,
'uni229C': 8860,
'uni2306': 8966,
'uni2253': 8787,
'twodotenleader': 8229,
'uni2131': 8497,
'uni21DA': 8666,
'uni2234': 8756,
'uni2235': 8757,
'uni01A5': 421,
'uni2237': 8759,
'uni2230': 8752,
'uni02CC': 716,
'slash': 47,
'uni01A0': 416,
'ellipsis': 8230,
'uni2299': 8857,
'uni2238': 8760,
'numbersign': 35,
'uni21A8': 8616,
'uni223D': 8765,
'uni01AF': 431,
'uni223F': 8767,
'uni01AD': 429,
'uni01AB': 427,
'odieresis': 246,
'uni223C': 8764,
'uni227D': 8829,
'uni0280': 640,
'O': 79,
'uni227E': 8830,
'uni21A5': 8613,
'uni22D4': 8916,
'uni25D4': 9684,
'uni227F': 8831,
'uni0435': 1077,
'uni2302': 8962,
'uni2669': 9833,
'uni24E3': 9443,
'uni2720': 10016,
'uni22A8': 8872,
'uni22A9': 8873,
'uni040A': 1034,
'uni22A7': 8871,
'oe': 339,
'uni040B': 1035,
'uni040E': 1038,
'uni22A3': 8867,
'o': 111,
'uni040F': 1039,
'Edieresis': 203,
'uni25D5': 9685,
'plus': 43,
'uni044D': 1101,
'uni263C': 9788,
'uni22E6': 8934,
'uni2283': 8835,
'uni258C': 9612,
'uni219E': 8606,
'uni24E4': 9444,
'uni2136': 8502,
'dagger': 8224,
'uni24B7': 9399,
'uni219B': 8603,
'uni22E5': 8933,
'three': 51,
'uni210B': 8459,
'uni2534': 9524,
'uni24B8': 9400,
'uni230A': 8970,
'hungarumlaut': 733,
'parenleft': 40,
'uni0148': 328,
'uni0149': 329,
'uni2124': 8484,
'uni2125': 8485,
'uni2126': 8486,
'uni2127': 8487,
'uni0140': 320,
'uni2129': 8489,
'uni25C5': 9669,
'uni0143': 323,
'uni0144': 324,
'uni0145': 325,
'uni0146': 326,
'uni0147': 327,
'uni210D': 8461,
'fraction': 8260,
'uni2031': 8241,
'uni2196': 8598,
'uni2035': 8245,
'uni24E6': 9446,
'uni016B': 363,
'uni24BA': 9402,
'uni266A': 9834,
'uni0116': 278,
'uni2115': 8469,
'registered': 174,
'J': 74,
'uni25DF': 9695,
'uni25CE': 9678,
'uni273D': 10045,
'dieresis': 168,
'uni212B': 8491,
'uni0114': 276,
'uni212D': 8493,
'uni212E': 8494,
'uni212F': 8495,
'uni014A': 330,
'uni014B': 331,
'uni014C': 332,
'uni014D': 333,
'uni014E': 334,
'uni014F': 335,
'uni025E': 606,
'uni24E8': 9448,
'uni0111': 273,
'uni24E9': 9449,
'Ograve': 210,
'j': 106,
'uni2195': 8597,
'uni2194': 8596,
'uni2197': 8599,
'uni2037': 8247,
'uni2191': 8593,
'uni2190': 8592,
'uni2193': 8595,
'uni2192': 8594,
'uni29FA': 10746,
'uni2713': 10003,
'z': 122,
'uni2199': 8601,
'uni2198': 8600,
'uni2667': 9831,
'ae': 230,
'uni0448': 1096,
'semicolon': 59,
'uni2666': 9830,
'uni038F': 911,
'uni0444': 1092,
'uni0447': 1095,
'uni038E': 910,
'uni0441': 1089,
'uni038C': 908,
'uni0443': 1091,
'uni038A': 906,
'uni0250': 592,
'uni0251': 593,
'uni0252': 594,
'uni0253': 595,
'uni0254': 596,
'at': 64,
'uni0256': 598,
'uni0257': 599,
'uni0167': 359,
'uni0259': 601,
'uni228C': 8844,
'uni2662': 9826,
'uni0319': 793,
'uni0318': 792,
'uni24BC': 9404,
'uni0402': 1026,
'uni22EF': 8943,
'Iacute': 205,
'uni22ED': 8941,
'uni22EE': 8942,
'uni0311': 785,
'uni0310': 784,
'uni21E8': 8680,
'uni0312': 786,
'percent': 37,
'uni0317': 791,
'uni0316': 790,
'uni21D6': 8662,
'uni21D7': 8663,
'uni21D4': 8660,
'uni21D5': 8661,
'uni21D2': 8658,
'uni21D3': 8659,
'uni21D0': 8656,
'uni2138': 8504,
'uni2270': 8816,
'uni2271': 8817,
'uni2272': 8818,
'uni2273': 8819,
'uni2274': 8820,
'uni2275': 8821,
'bracketright': 93,
'uni21D9': 8665,
'uni21DF': 8671,
'uni21DD': 8669,
'uni21DE': 8670,
'AE': 198,
'uni03AE': 942,
'uni227A': 8826,
'uni227B': 8827,
'uni227C': 8828,
'asterisk': 42,
'aacute': 225,
'uni226F': 8815,
'uni22E2': 8930,
'uni0386': 902,
'uni22E0': 8928,
'uni22E1': 8929,
'U': 85,
'uni22E7': 8935,
'uni22E4': 8932,
'uni0387': 903,
'uni031A': 794,
'eacute': 233,
'uni22E8': 8936,
'uni22E9': 8937,
'uni24D8': 9432,
'uni025A': 602,
'uni025B': 603,
'uni025C': 604,
'e': 101,
'uni0128': 296,
'uni025F': 607,
'uni2665': 9829,
'thorn': 254,
'uni0129': 297,
'uni253C': 9532,
'uni25D7': 9687,
'u': 117,
'uni0388': 904,
'uni0389': 905,
'uni0255': 597,
'uni0171': 369,
'uni0384': 900,
'uni0385': 901,
'uni044A': 1098,
'uni252C': 9516,
'uni044C': 1100,
'uni044B': 1099}
uni2type1 = dict([(v,k) for k,v in type12uni.items()])
tex2uni = {
'widehat': 0x0302,
'widetilde': 0x0303,
'langle': 0x27e8,
'rangle': 0x27e9,
'perp': 0x27c2,
'neq': 0x2260,
'Join': 0x2a1d,
'leqslant': 0x2a7d,
'geqslant': 0x2a7e,
'lessapprox': 0x2a85,
'gtrapprox': 0x2a86,
'lesseqqgtr': 0x2a8b,
'gtreqqless': 0x2a8c,
'triangleeq': 0x225c,
'eqslantless': 0x2a95,
'eqslantgtr': 0x2a96,
'backepsilon': 0x03f6,
'precapprox': 0x2ab7,
'succapprox': 0x2ab8,
'fallingdotseq': 0x2252,
'subseteqq': 0x2ac5,
'supseteqq': 0x2ac6,
'varpropto': 0x221d,
'precnapprox': 0x2ab9,
'succnapprox': 0x2aba,
'subsetneqq': 0x2acb,
'supsetneqq': 0x2acc,
'lnapprox': 0x2ab9,
'gnapprox': 0x2aba,
'longleftarrow': 0x27f5,
'longrightarrow': 0x27f6,
'longleftrightarrow': 0x27f7,
'Longleftarrow': 0x27f8,
'Longrightarrow': 0x27f9,
'Longleftrightarrow': 0x27fa,
'longmapsto': 0x27fc,
'leadsto': 0x21dd,
'dashleftarrow': 0x290e,
'dashrightarrow': 0x290f,
'circlearrowleft': 0x21ba,
'circlearrowright': 0x21bb,
'leftrightsquigarrow': 0x21ad,
'leftsquigarrow': 0x219c,
'rightsquigarrow': 0x219d,
'Game': 0x2141,
'hbar': 0x0127,
'hslash': 0x210f,
'ldots': 0x22ef,
'vdots': 0x22ee,
'doteqdot': 0x2251,
'doteq': 8784,
'partial': 8706,
'gg': 8811,
'asymp': 8781,
'blacktriangledown': 9662,
'otimes': 8855,
'nearrow': 8599,
'varpi': 982,
'vee': 8744,
'vec': 8407,
'smile': 8995,
'succnsim': 8937,
'gimel': 8503,
'vert': 124,
'|': 124,
'varrho': 1009,
'P': 182,
'approxident': 8779,
'Swarrow': 8665,
'textasciicircum': 94,
'imageof': 8887,
'ntriangleleft': 8938,
'nleq': 8816,
'div': 247,
'nparallel': 8742,
'Leftarrow': 8656,
'lll': 8920,
'oiint': 8751,
'ngeq': 8817,
'Theta': 920,
'origof': 8886,
'blacksquare': 9632,
'solbar': 9023,
'neg': 172,
'sum': 8721,
'Vdash': 8873,
'coloneq': 8788,
'degree': 176,
'bowtie': 8904,
'blacktriangleright': 9654,
'varsigma': 962,
'leq': 8804,
'ggg': 8921,
'lneqq': 8808,
'scurel': 8881,
'stareq': 8795,
'BbbN': 8469,
'nLeftarrow': 8653,
'nLeftrightarrow': 8654,
'k': 808,
'bot': 8869,
'BbbC': 8450,
'Lsh': 8624,
'leftleftarrows': 8647,
'BbbZ': 8484,
'digamma': 989,
'BbbR': 8477,
'BbbP': 8473,
'BbbQ': 8474,
'vartriangleright': 8883,
'succsim': 8831,
'wedge': 8743,
'lessgtr': 8822,
'veebar': 8891,
'mapsdown': 8615,
'Rsh': 8625,
'chi': 967,
'prec': 8826,
'nsubseteq': 8840,
'therefore': 8756,
'eqcirc': 8790,
'textexclamdown': 161,
'nRightarrow': 8655,
'flat': 9837,
'notin': 8713,
'llcorner': 8990,
'varepsilon': 949,
'bigtriangleup': 9651,
'aleph': 8501,
'dotminus': 8760,
'upsilon': 965,
'Lambda': 923,
'cap': 8745,
'barleftarrow': 8676,
'mu': 956,
'boxplus': 8862,
'mp': 8723,
'circledast': 8859,
'tau': 964,
'in': 8712,
'backslash': 92,
'varnothing': 8709,
'sharp': 9839,
'eqsim': 8770,
'gnsim': 8935,
'Searrow': 8664,
'updownarrows': 8645,
'heartsuit': 9825,
'trianglelefteq': 8884,
'ddag': 8225,
'sqsubseteq': 8849,
'mapsfrom': 8612,
'boxbar': 9707,
'sim': 8764,
'Nwarrow': 8662,
'nequiv': 8802,
'succ': 8827,
'vdash': 8866,
'Leftrightarrow': 8660,
'parallel': 8741,
'invnot': 8976,
'natural': 9838,
'ss': 223,
'uparrow': 8593,
'nsim': 8769,
'hookrightarrow': 8618,
'Equiv': 8803,
'approx': 8776,
'Vvdash': 8874,
'nsucc': 8833,
'leftrightharpoons': 8651,
'Re': 8476,
'boxminus': 8863,
'equiv': 8801,
'Lleftarrow': 8666,
'thinspace': 8201,
'll': 8810,
'Cup': 8915,
'measeq': 8798,
'upharpoonleft': 8639,
'lq': 8216,
'Upsilon': 933,
'subsetneq': 8842,
'greater': 62,
'supsetneq': 8843,
'Cap': 8914,
'L': 321,
'spadesuit': 9824,
'lrcorner': 8991,
'not': 824,
'bar': 772,
'rightharpoonaccent': 8401,
'boxdot': 8865,
'l': 322,
'leftharpoondown': 8637,
'bigcup': 8899,
'iint': 8748,
'bigwedge': 8896,
'downharpoonleft': 8643,
'textasciitilde': 126,
'subset': 8834,
'leqq': 8806,
'mapsup': 8613,
'nvDash': 8877,
'looparrowleft': 8619,
'nless': 8814,
'rightarrowbar': 8677,
'Vert': 8214,
'downdownarrows': 8650,
'uplus': 8846,
'simeq': 8771,
'napprox': 8777,
'ast': 8727,
'twoheaduparrow': 8607,
'doublebarwedge': 8966,
'Sigma': 931,
'leftharpoonaccent': 8400,
'ntrianglelefteq': 8940,
'nexists': 8708,
'times': 215,
'measuredangle': 8737,
'bumpeq': 8783,
'carriagereturn': 8629,
'adots': 8944,
'checkmark': 10003,
'lambda': 955,
'xi': 958,
'rbrace': 125,
'rbrack': 93,
'Nearrow': 8663,
'maltese': 10016,
'clubsuit': 9827,
'top': 8868,
'overarc': 785,
'varphi': 966,
'Delta': 916,
'iota': 953,
'nleftarrow': 8602,
'candra': 784,
'supset': 8835,
'triangleleft': 9665,
'gtreqless': 8923,
'ntrianglerighteq': 8941,
'quad': 8195,
'Xi': 926,
'gtrdot': 8919,
'leftthreetimes': 8907,
'minus': 8722,
'preccurlyeq': 8828,
'nleftrightarrow': 8622,
'lambdabar': 411,
'blacktriangle': 9652,
'kernelcontraction': 8763,
'Phi': 934,
'angle': 8736,
'spadesuitopen': 9828,
'eqless': 8924,
'mid': 8739,
'varkappa': 1008,
'Ldsh': 8626,
'updownarrow': 8597,
'beta': 946,
'textquotedblleft': 8220,
'rho': 961,
'alpha': 945,
'intercal': 8890,
'beth': 8502,
'grave': 768,
'acwopencirclearrow': 8634,
'nmid': 8740,
'nsupset': 8837,
'sigma': 963,
'dot': 775,
'Rightarrow': 8658,
'turnednot': 8985,
'backsimeq': 8909,
'leftarrowtail': 8610,
'approxeq': 8778,
'curlyeqsucc': 8927,
'rightarrowtail': 8611,
'Psi': 936,
'copyright': 169,
'yen': 165,
'vartriangleleft': 8882,
'rasp': 700,
'triangleright': 9655,
'precsim': 8830,
'infty': 8734,
'geq': 8805,
'updownarrowbar': 8616,
'precnsim': 8936,
'H': 779,
'ulcorner': 8988,
'looparrowright': 8620,
'ncong': 8775,
'downarrow': 8595,
'circeq': 8791,
'subseteq': 8838,
'bigstar': 9733,
'prime': 8242,
'lceil': 8968,
'Rrightarrow': 8667,
'oiiint': 8752,
'curlywedge': 8911,
'vDash': 8872,
'lfloor': 8970,
'ddots': 8945,
'exists': 8707,
'underbar': 817,
'Pi': 928,
'leftrightarrows': 8646,
'sphericalangle': 8738,
'coprod': 8720,
'circledcirc': 8858,
'gtrsim': 8819,
'gneqq': 8809,
'between': 8812,
'theta': 952,
'complement': 8705,
'arceq': 8792,
'nVdash': 8878,
'S': 167,
'wr': 8768,
'wp': 8472,
'backcong': 8780,
'lasp': 701,
'c': 807,
'nabla': 8711,
'dotplus': 8724,
'eta': 951,
'forall': 8704,
'eth': 240,
'colon': 58,
'sqcup': 8852,
'rightrightarrows': 8649,
'sqsupset': 8848,
'mapsto': 8614,
'bigtriangledown': 9661,
'sqsupseteq': 8850,
'propto': 8733,
'pi': 960,
'pm': 177,
'dots': 8230,
'nrightarrow': 8603,
'textasciiacute': 180,
'Doteq': 8785,
'breve': 774,
'sqcap': 8851,
'twoheadrightarrow': 8608,
'kappa': 954,
'vartriangle': 9653,
'diamondsuit': 9826,
'pitchfork': 8916,
'blacktriangleleft': 9664,
'nprec': 8832,
'vdots': 8942,
'curvearrowright': 8631,
'barwedge': 8892,
'multimap': 8888,
'textquestiondown': 191,
'cong': 8773,
'rtimes': 8906,
'rightzigzagarrow': 8669,
'rightarrow': 8594,
'leftarrow': 8592,
'__sqrt__': 8730,
'twoheaddownarrow': 8609,
'oint': 8750,
'bigvee': 8897,
'eqdef': 8797,
'sterling': 163,
'phi': 981,
'Updownarrow': 8661,
'backprime': 8245,
'emdash': 8212,
'Gamma': 915,
'i': 305,
'rceil': 8969,
'leftharpoonup': 8636,
'Im': 8465,
'curvearrowleft': 8630,
'wedgeq': 8793,
'fallingdotseq': 8786,
'curlyeqprec': 8926,
'questeq': 8799,
'less': 60,
'upuparrows': 8648,
'tilde': 771,
'textasciigrave': 96,
'smallsetminus': 8726,
'ell': 8467,
'cup': 8746,
'danger': 9761,
'nVDash': 8879,
'cdotp': 183,
'cdots': 8943,
'hat': 770,
'eqgtr': 8925,
'enspace': 8194,
'psi': 968,
'frown': 8994,
'acute': 769,
'downzigzagarrow': 8623,
'ntriangleright': 8939,
'cupdot': 8845,
'circleddash': 8861,
'oslash': 8856,
'mho': 8487,
'd': 803,
'sqsubset': 8847,
'cdot': 8901,
'Omega': 937,
'OE': 338,
'veeeq': 8794,
'Finv': 8498,
't': 865,
'leftrightarrow': 8596,
'swarrow': 8601,
'rightthreetimes': 8908,
'rightleftharpoons': 8652,
'lesssim': 8818,
'searrow': 8600,
'because': 8757,
'gtrless': 8823,
'star': 8902,
'nsubset': 8836,
'zeta': 950,
'dddot': 8411,
'bigcirc': 9675,
'Supset': 8913,
'circ': 8728,
'slash': 8725,
'ocirc': 778,
'prod': 8719,
'twoheadleftarrow': 8606,
'daleth': 8504,
'upharpoonright': 8638,
'odot': 8857,
'Uparrow': 8657,
'O': 216,
'hookleftarrow': 8617,
'trianglerighteq': 8885,
'nsime': 8772,
'oe': 339,
'nwarrow': 8598,
'o': 248,
'ddddot': 8412,
'downharpoonright': 8642,
'succcurlyeq': 8829,
'gamma': 947,
'scrR': 8475,
'dag': 8224,
'thickspace': 8197,
'frakZ': 8488,
'lessdot': 8918,
'triangledown': 9663,
'ltimes': 8905,
'scrB': 8492,
'endash': 8211,
'scrE': 8496,
'scrF': 8497,
'scrH': 8459,
'scrI': 8464,
'rightharpoondown': 8641,
'scrL': 8466,
'scrM': 8499,
'frakC': 8493,
'nsupseteq': 8841,
'circledR': 174,
'circledS': 9416,
'ngtr': 8815,
'bigcap': 8898,
'scre': 8495,
'Downarrow': 8659,
'scrg': 8458,
'overleftrightarrow': 8417,
'scro': 8500,
'lnsim': 8934,
'eqcolon': 8789,
'curlyvee': 8910,
'urcorner': 8989,
'lbrace': 123,
'Bumpeq': 8782,
'delta': 948,
'boxtimes': 8864,
'overleftarrow': 8406,
'prurel': 8880,
'clubsuitopen': 9831,
'cwopencirclearrow': 8635,
'geqq': 8807,
'rightleftarrows': 8644,
'ac': 8766,
'ae': 230,
'int': 8747,
'rfloor': 8971,
'risingdotseq': 8787,
'nvdash': 8876,
'diamond': 8900,
'ddot': 776,
'backsim': 8765,
'oplus': 8853,
'triangleq': 8796,
'check': 780,
'ni': 8715,
'iiint': 8749,
'ne': 8800,
'lesseqgtr': 8922,
'obar': 9021,
'supseteq': 8839,
'nu': 957,
'AA': 8491,
'AE': 198,
'models': 8871,
'ominus': 8854,
'dashv': 8867,
'omega': 969,
'rq': 8217,
'Subset': 8912,
'rightharpoonup': 8640,
'Rdsh': 8627,
'bullet': 8729,
'divideontimes': 8903,
'lbrack': 91,
'textquotedblright': 8221,
'Colon': 8759,
'%': 37,
'$': 36,
'{': 123,
'}': 125,
'_': 95,
'imath': 0x131,
'circumflexaccent' : 770,
'combiningbreve' : 774,
'combiningoverline' : 772,
'combininggraveaccent' : 768,
'combiningacuteaccent' : 769,
'combiningdiaeresis' : 776,
'combiningtilde' : 771,
'combiningrightarrowabove' : 8407,
'combiningdotabove' : 775,
'to': 8594,
'succeq': 8829,
'emptyset': 8709,
'leftparen': 40,
'rightparen': 41,
'bigoplus': 10753,
'leftangle': 10216,
'rightangle': 10217,
'leftbrace': 124,
'rightbrace': 125,
'jmath': 567,
'bigodot': 10752,
'preceq': 8828,
'biguplus': 10756,
'epsilon': 949,
'vartheta': 977,
'bigotimes': 10754
}
# Each element is a 4-tuple of the form:
# src_start, src_end, dst_font, dst_start
#
stix_virtual_fonts = {
'bb':
{
'rm':
[
(0x0030, 0x0039, 'rm', 0x1d7d8), # 0-9
(0x0041, 0x0042, 'rm', 0x1d538), # A-B
(0x0043, 0x0043, 'rm', 0x2102), # C
(0x0044, 0x0047, 'rm', 0x1d53b), # D-G
(0x0048, 0x0048, 'rm', 0x210d), # H
(0x0049, 0x004d, 'rm', 0x1d540), # I-M
(0x004e, 0x004e, 'rm', 0x2115), # N
(0x004f, 0x004f, 'rm', 0x1d546), # O
(0x0050, 0x0051, 'rm', 0x2119), # P-Q
(0x0052, 0x0052, 'rm', 0x211d), # R
(0x0053, 0x0059, 'rm', 0x1d54a), # S-Y
(0x005a, 0x005a, 'rm', 0x2124), # Z
(0x0061, 0x007a, 'rm', 0x1d552), # a-z
(0x0393, 0x0393, 'rm', 0x213e), # \Gamma
(0x03a0, 0x03a0, 'rm', 0x213f), # \Pi
(0x03a3, 0x03a3, 'rm', 0x2140), # \Sigma
(0x03b3, 0x03b3, 'rm', 0x213d), # \gamma
(0x03c0, 0x03c0, 'rm', 0x213c), # \pi
],
'it':
[
(0x0030, 0x0039, 'rm', 0x1d7d8), # 0-9
(0x0041, 0x0042, 'it', 0xe154), # A-B
(0x0043, 0x0043, 'it', 0x2102), # C (missing in beta STIX fonts)
(0x0044, 0x0044, 'it', 0x2145), # D
(0x0045, 0x0047, 'it', 0xe156), # E-G
(0x0048, 0x0048, 'it', 0x210d), # H (missing in beta STIX fonts)
(0x0049, 0x004d, 'it', 0xe159), # I-M
(0x004e, 0x004e, 'it', 0x2115), # N (missing in beta STIX fonts)
(0x004f, 0x004f, 'it', 0xe15e), # O
(0x0050, 0x0051, 'it', 0x2119), # P-Q (missing in beta STIX fonts)
(0x0052, 0x0052, 'it', 0x211d), # R (missing in beta STIX fonts)
(0x0053, 0x0059, 'it', 0xe15f), # S-Y
(0x005a, 0x005a, 'it', 0x2124), # Z (missing in beta STIX fonts)
(0x0061, 0x0063, 'it', 0xe166), # a-c
(0x0064, 0x0065, 'it', 0x2146), # d-e
(0x0066, 0x0068, 'it', 0xe169), # f-h
(0x0069, 0x006a, 'it', 0x2148), # i-j
(0x006b, 0x007a, 'it', 0xe16c), # k-z
(0x0393, 0x0393, 'it', 0x213e), # \Gamma (missing in beta STIX fonts)
(0x03a0, 0x03a0, 'it', 0x213f), # \Pi
(0x03a3, 0x03a3, 'it', 0x2140), # \Sigma (missing in beta STIX fonts)
(0x03b3, 0x03b3, 'it', 0x213d), # \gamma (missing in beta STIX fonts)
(0x03c0, 0x03c0, 'it', 0x213c), # \pi
],
'bf':
[
(0x0030, 0x0039, 'rm', 0x1d7d8), # 0-9
(0x0041, 0x005a, 'bf', 0xe38a), # A-Z
(0x0061, 0x007a, 'bf', 0xe39d), # a-z
(0x0393, 0x0393, 'bf', 0x213e), # \Gamma
(0x03a0, 0x03a0, 'bf', 0x213f), # \Pi
(0x03a3, 0x03a3, 'bf', 0x2140), # \Sigma
(0x03b3, 0x03b3, 'bf', 0x213d), # \gamma
(0x03c0, 0x03c0, 'bf', 0x213c), # \pi
],
},
'cal':
[
(0x0041, 0x005a, 'it', 0xe22d), # A-Z
],
'circled':
{
'rm':
[
(0x0030, 0x0030, 'rm', 0x24ea), # 0
(0x0031, 0x0039, 'rm', 0x2460), # 1-9
(0x0041, 0x005a, 'rm', 0x24b6), # A-Z
(0x0061, 0x007a, 'rm', 0x24d0) # a-z
],
'it':
[
(0x0030, 0x0030, 'rm', 0x24ea), # 0
(0x0031, 0x0039, 'rm', 0x2460), # 1-9
(0x0041, 0x005a, 'it', 0x24b6), # A-Z
(0x0061, 0x007a, 'it', 0x24d0) # a-z
],
'bf':
[
(0x0030, 0x0030, 'bf', 0x24ea), # 0
(0x0031, 0x0039, 'bf', 0x2460), # 1-9
(0x0041, 0x005a, 'bf', 0x24b6), # A-Z
(0x0061, 0x007a, 'bf', 0x24d0) # a-z
],
},
'frak':
{
'rm':
[
(0x0041, 0x0042, 'rm', 0x1d504), # A-B
(0x0043, 0x0043, 'rm', 0x212d), # C
(0x0044, 0x0047, 'rm', 0x1d507), # D-G
(0x0048, 0x0048, 'rm', 0x210c), # H
(0x0049, 0x0049, 'rm', 0x2111), # I
(0x004a, 0x0051, 'rm', 0x1d50d), # J-Q
(0x0052, 0x0052, 'rm', 0x211c), # R
(0x0053, 0x0059, 'rm', 0x1d516), # S-Y
(0x005a, 0x005a, 'rm', 0x2128), # Z
(0x0061, 0x007a, 'rm', 0x1d51e), # a-z
],
'it':
[
(0x0041, 0x0042, 'rm', 0x1d504), # A-B
(0x0043, 0x0043, 'rm', 0x212d), # C
(0x0044, 0x0047, 'rm', 0x1d507), # D-G
(0x0048, 0x0048, 'rm', 0x210c), # H
(0x0049, 0x0049, 'rm', 0x2111), # I
(0x004a, 0x0051, 'rm', 0x1d50d), # J-Q
(0x0052, 0x0052, 'rm', 0x211c), # R
(0x0053, 0x0059, 'rm', 0x1d516), # S-Y
(0x005a, 0x005a, 'rm', 0x2128), # Z
(0x0061, 0x007a, 'rm', 0x1d51e), # a-z
],
'bf':
[
(0x0041, 0x005a, 'bf', 0x1d56c), # A-Z
(0x0061, 0x007a, 'bf', 0x1d586), # a-z
],
},
'scr':
[
(0x0041, 0x0041, 'it', 0x1d49c), # A
(0x0042, 0x0042, 'it', 0x212c), # B
(0x0043, 0x0044, 'it', 0x1d49e), # C-D
(0x0045, 0x0046, 'it', 0x2130), # E-F
(0x0047, 0x0047, 'it', 0x1d4a2), # G
(0x0048, 0x0048, 'it', 0x210b), # H
(0x0049, 0x0049, 'it', 0x2110), # I
(0x004a, 0x004b, 'it', 0x1d4a5), # J-K
(0x004c, 0x004c, 'it', 0x2112), # L
(0x004d, 0x003d, 'it', 0x2113), # M
(0x004e, 0x0051, 'it', 0x1d4a9), # N-Q
(0x0052, 0x0052, 'it', 0x211b), # R
(0x0053, 0x005a, 'it', 0x1d4ae), # S-Z
(0x0061, 0x0064, 'it', 0x1d4b6), # a-d
(0x0065, 0x0065, 'it', 0x212f), # e
(0x0066, 0x0066, 'it', 0x1d4bb), # f
(0x0067, 0x0067, 'it', 0x210a), # g
(0x0068, 0x006e, 'it', 0x1d4bd), # h-n
(0x006f, 0x006f, 'it', 0x2134), # o
(0x0070, 0x007a, 'it', 0x1d4c5), # p-z
],
'sf':
{
'rm':
[
(0x0030, 0x0039, 'rm', 0x1d7e2), # 0-9
(0x0041, 0x005a, 'rm', 0x1d5a0), # A-Z
(0x0061, 0x007a, 'rm', 0x1d5ba), # a-z
(0x0391, 0x03a9, 'rm', 0xe17d), # \Alpha-\Omega
(0x03b1, 0x03c9, 'rm', 0xe196), # \alpha-\omega
(0x03d1, 0x03d1, 'rm', 0xe1b0), # theta variant
(0x03d5, 0x03d5, 'rm', 0xe1b1), # phi variant
(0x03d6, 0x03d6, 'rm', 0xe1b3), # pi variant
(0x03f1, 0x03f1, 'rm', 0xe1b2), # rho variant
(0x03f5, 0x03f5, 'rm', 0xe1af), # lunate epsilon
(0x2202, 0x2202, 'rm', 0xe17c), # partial differential
],
'it':
[
# These numerals are actually upright. We don't actually
# want italic numerals ever.
(0x0030, 0x0039, 'rm', 0x1d7e2), # 0-9
(0x0041, 0x005a, 'it', 0x1d608), # A-Z
(0x0061, 0x007a, 'it', 0x1d622), # a-z
(0x0391, 0x03a9, 'rm', 0xe17d), # \Alpha-\Omega
(0x03b1, 0x03c9, 'it', 0xe1d8), # \alpha-\omega
(0x03d1, 0x03d1, 'it', 0xe1f2), # theta variant
(0x03d5, 0x03d5, 'it', 0xe1f3), # phi variant
(0x03d6, 0x03d6, 'it', 0xe1f5), # pi variant
(0x03f1, 0x03f1, 'it', 0xe1f4), # rho variant
(0x03f5, 0x03f5, 'it', 0xe1f1), # lunate epsilon
],
'bf':
[
(0x0030, 0x0039, 'bf', 0x1d7ec), # 0-9
(0x0041, 0x005a, 'bf', 0x1d5d4), # A-Z
(0x0061, 0x007a, 'bf', 0x1d5ee), # a-z
(0x0391, 0x03a9, 'bf', 0x1d756), # \Alpha-\Omega
(0x03b1, 0x03c9, 'bf', 0x1d770), # \alpha-\omega
(0x03d1, 0x03d1, 'bf', 0x1d78b), # theta variant
(0x03d5, 0x03d5, 'bf', 0x1d78d), # phi variant
(0x03d6, 0x03d6, 'bf', 0x1d78f), # pi variant
(0x03f0, 0x03f0, 'bf', 0x1d78c), # kappa variant
(0x03f1, 0x03f1, 'bf', 0x1d78e), # rho variant
(0x03f5, 0x03f5, 'bf', 0x1d78a), # lunate epsilon
(0x2202, 0x2202, 'bf', 0x1d789), # partial differential
(0x2207, 0x2207, 'bf', 0x1d76f), # \Nabla
],
},
'tt':
[
(0x0030, 0x0039, 'rm', 0x1d7f6), # 0-9
(0x0041, 0x005a, 'rm', 0x1d670), # A-Z
(0x0061, 0x007a, 'rm', 0x1d68a) # a-z
],
}
|
gpl-3.0
|
plissonf/scikit-learn
|
examples/neighbors/plot_approximate_nearest_neighbors_scalability.py
|
225
|
5719
|
"""
============================================
Scalability of Approximate Nearest Neighbors
============================================
This example studies the scalability profile of approximate 10-neighbors
queries using the LSHForest with ``n_estimators=20`` and ``n_candidates=200``
when varying the number of samples in the dataset.
The first plot demonstrates the relationship between query time and index size
of LSHForest. Query time is compared with the brute force method in exact
nearest neighbor search for the same index sizes. The brute force queries have a
very predictable linear scalability with the index (full scan). LSHForest index
have sub-linear scalability profile but can be slower for small datasets.
The second plot shows the speedup when using approximate queries vs brute force
exact queries. The speedup tends to increase with the dataset size but should
reach a plateau typically when doing queries on datasets with millions of
samples and a few hundreds of dimensions. Higher dimensional datasets tends to
benefit more from LSHForest indexing.
The break even point (speedup = 1) depends on the dimensionality and structure
of the indexed data and the parameters of the LSHForest index.
The precision of approximate queries should decrease slowly with the dataset
size. The speed of the decrease depends mostly on the LSHForest parameters and
the dimensionality of the data.
"""
from __future__ import division
print(__doc__)
# Authors: Maheshakya Wijewardena <[email protected]>
# Olivier Grisel <[email protected]>
#
# License: BSD 3 clause
###############################################################################
import time
import numpy as np
from sklearn.datasets.samples_generator import make_blobs
from sklearn.neighbors import LSHForest
from sklearn.neighbors import NearestNeighbors
import matplotlib.pyplot as plt
# Parameters of the study
n_samples_min = int(1e3)
n_samples_max = int(1e5)
n_features = 100
n_centers = 100
n_queries = 100
n_steps = 6
n_iter = 5
# Initialize the range of `n_samples`
n_samples_values = np.logspace(np.log10(n_samples_min),
np.log10(n_samples_max),
n_steps).astype(np.int)
# Generate some structured data
rng = np.random.RandomState(42)
all_data, _ = make_blobs(n_samples=n_samples_max + n_queries,
n_features=n_features, centers=n_centers, shuffle=True,
random_state=0)
queries = all_data[:n_queries]
index_data = all_data[n_queries:]
# Metrics to collect for the plots
average_times_exact = []
average_times_approx = []
std_times_approx = []
accuracies = []
std_accuracies = []
average_speedups = []
std_speedups = []
# Calculate the average query time
for n_samples in n_samples_values:
X = index_data[:n_samples]
# Initialize LSHForest for queries of a single neighbor
lshf = LSHForest(n_estimators=20, n_candidates=200,
n_neighbors=10).fit(X)
nbrs = NearestNeighbors(algorithm='brute', metric='cosine',
n_neighbors=10).fit(X)
time_approx = []
time_exact = []
accuracy = []
for i in range(n_iter):
# pick one query at random to study query time variability in LSHForest
query = queries[rng.randint(0, n_queries)]
t0 = time.time()
exact_neighbors = nbrs.kneighbors(query, return_distance=False)
time_exact.append(time.time() - t0)
t0 = time.time()
approx_neighbors = lshf.kneighbors(query, return_distance=False)
time_approx.append(time.time() - t0)
accuracy.append(np.in1d(approx_neighbors, exact_neighbors).mean())
average_time_exact = np.mean(time_exact)
average_time_approx = np.mean(time_approx)
speedup = np.array(time_exact) / np.array(time_approx)
average_speedup = np.mean(speedup)
mean_accuracy = np.mean(accuracy)
std_accuracy = np.std(accuracy)
print("Index size: %d, exact: %0.3fs, LSHF: %0.3fs, speedup: %0.1f, "
"accuracy: %0.2f +/-%0.2f" %
(n_samples, average_time_exact, average_time_approx, average_speedup,
mean_accuracy, std_accuracy))
accuracies.append(mean_accuracy)
std_accuracies.append(std_accuracy)
average_times_exact.append(average_time_exact)
average_times_approx.append(average_time_approx)
std_times_approx.append(np.std(time_approx))
average_speedups.append(average_speedup)
std_speedups.append(np.std(speedup))
# Plot average query time against n_samples
plt.figure()
plt.errorbar(n_samples_values, average_times_approx, yerr=std_times_approx,
fmt='o-', c='r', label='LSHForest')
plt.plot(n_samples_values, average_times_exact, c='b',
label="NearestNeighbors(algorithm='brute', metric='cosine')")
plt.legend(loc='upper left', fontsize='small')
plt.ylim(0, None)
plt.ylabel("Average query time in seconds")
plt.xlabel("n_samples")
plt.grid(which='both')
plt.title("Impact of index size on response time for first "
"nearest neighbors queries")
# Plot average query speedup versus index size
plt.figure()
plt.errorbar(n_samples_values, average_speedups, yerr=std_speedups,
fmt='o-', c='r')
plt.ylim(0, None)
plt.ylabel("Average speedup")
plt.xlabel("n_samples")
plt.grid(which='both')
plt.title("Speedup of the approximate NN queries vs brute force")
# Plot average precision versus index size
plt.figure()
plt.errorbar(n_samples_values, accuracies, std_accuracies, fmt='o-', c='c')
plt.ylim(0, 1.1)
plt.ylabel("precision@10")
plt.xlabel("n_samples")
plt.grid(which='both')
plt.title("precision of 10-nearest-neighbors queries with index size")
plt.show()
|
bsd-3-clause
|
1kastner/analyse_weather_data
|
plot_weather_data/show_husconet_properties.py
|
1
|
1406
|
"""
Depends on filter_weather_data.filters.preparation.average_husconet_temperature
"""
import logging
import os
import pandas
from matplotlib import pyplot
from matplotlib import ticker as mticker
import seaborn
from gather_weather_data.husconet import HUSCONET_STATIONS
from gather_weather_data.husconet import OFFICIAL_HUSCONET_NAME
from filter_weather_data.filters import PROCESSED_DATA_DIR
seaborn.set(style='ticks')
def plot_stations():
"""
Plots all HUSCONET weather stations as boxplots.
"""
plot_df = pandas.DataFrame()
fig = pyplot.figure()
fig.canvas.set_window_title("husconet boxplots")
for husconet_station in HUSCONET_STATIONS:
csv_file = os.path.join(PROCESSED_DATA_DIR, "husconet", husconet_station + ".csv")
logging.debug("loading " + csv_file)
husconet_station_df = pandas.read_csv(csv_file, index_col="datetime", parse_dates=["datetime"])
plot_df[OFFICIAL_HUSCONET_NAME[husconet_station]] = husconet_station_df.temperature
logging.debug("start plotting")
ax = seaborn.boxplot(data=plot_df, width=.5)
ax.set(xlabel="HUSCONET Station", ylabel="Temperatur (°C)")
ax.yaxis.set_major_locator(mticker.MultipleLocator(5)) # draw line every 5 °C
pyplot.grid(color='.8') # a very light gray
pyplot.show()
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
plot_stations()
|
agpl-3.0
|
belenox/dmwp
|
Chap3.py
|
1
|
6064
|
shortlist = [1, 2, 3]
print sum(shortlist)
print len(shortlist)
def calculate_mean(numbers):
s = float(sum(numbers))
N = float(len(numbers))
mean = s/N
return mean
print calculate_mean([1, 2])
if __name__ == '__main__':
donations = [100, 60, 70, 900, 100, 200, 500, 500, 503, 600, 1000, 1200]
print 'Mean donations over the last {0} days is {1}'.format(len(donations), calculate_mean(donations))
samplelist = [4, 1, 3]
samplelist.sort()
print samplelist
def calculate_median(numbers):
N = len(numbers)
numbers.sort()
if N % 2 == 0:
m1 = N/2
m2 = (N/2) + 1
median = (numbers[int(m1) - 1] + numbers[int(m2) - 1]) / 2
else:
median = numbers[((N+1) / 2) - 1]
return median
if __name__ == '__main__':
donations = [100, 60, 70, 900, 100, 200, 500, 500, 503, 600, 1000, 1200]
print 'Median donation over the last {0} days is {1}'.format(len(donations), calculate_median(donations))
simplelist = [4, 2, 1, 3, 4]
from collections import Counter
c = Counter(simplelist)
print c.most_common()
print c.most_common(1)
print c.most_common(2)
mode = c.most_common(1)
print mode
print mode[0]
print mode[0][0]
def calculate_mode(numbers):
return Counter(numbers).most_common(1)[0][0]
if __name__ == '__main__':
scores = [7, 8, 9, 2, 10, 9, 9, 9, 9, 4, 5, 6, 1, 5, 6, 7, 8, 6, 1, 10]
mode = calculate_mode(scores)
print 'The mode of the list of numbers is: {0}'.format(mode)
def calculate_mode(numbers):
c = Counter(numbers).most_common()
max_count, modes = c[0][1], []
for num in c:
if num[1] == max_count: modes.append(num[0])
return modes
if __name__ == '__main__':
scores = [5, 5, 5, 4, 4, 4, 9, 1, 3]
modes = calculate_mode(scores)
print 'The mode(s) of the list of numbers are: '
for mode in modes: print mode
def frequency_table(numbers):
table = Counter(numbers)
print 'Number\tFrequency'
for number in table.most_common():
print '{0}\t{1}'.format(number[0], number[1])
if __name__ == '__main__':
scores = [7, 8, 9, 2, 10, 9, 9, 9, 9, 4, 5, 6, 1, 5, 6, 7, 8, 6, 1, 10]
frequency_table(scores)
def frequency_table(numbers):
table = Counter(numbers)
numbers_freq = table.most_common()
numbers_freq.sort()
print('Number\tFrequency')
for number in numbers_freq:
print '{0}\t{1}'.format(number[0], number[1])
if __name__ == '__main__':
scores = [7, 8, 9, 2, 10, 9, 9, 9, 9, 4, 5, 6, 1, 5, 6, 7, 8, 6, 1, 10]
frequency_table(scores)
def find_range(numbers):
low = min(numbers)
high = max(numbers)
r = high - low
return low, high, r
if __name__ == '__main__':
donations = [100, 60, 70, 900, 100, 200, 500, 500, 503, 600, 1000, 1200]
lowest, highest, r = find_range(donations)
print('Lowest: {0} Highest: {1} Range: {2}'.format(lowest, highest, r))
def find_differences(numbers):
mean = calculate_mean(numbers)
diff = []
for num in numbers:
diff.append(num-mean)
return diff
def calculate_variance(numbers):
for x in range(len(numbers)): numbers[x] = float(numbers[x])
diff = find_differences(numbers)
squared_diff = []
for d in diff:
squared_diff.append(d**2)
variance = sum(squared_diff)/len(numbers)
return variance
if __name__ == '__main__':
donations = [100, 60, 70, 900, 100, 200, 500, 500, 503, 600, 1000, 1200]
variance = calculate_variance(donations)
print('The variance of the list of numbers is {0}'.format(variance))
std = variance**0.5
print 'The standard deviation of the list of numbers is {0}'.format(std)
simplelist1 = [1, 2, 3]
simplelist2 = [4, 5, 6]
for x, y in zip(simplelist1, simplelist2):
print x, y
def findcorrxy(x, y):
n = len(x)
prod = []
for xi, yi in zip(x, y):
prod.append(xi*yi)
sumprodxy = sum(prod)
sumx = sum(x)
sumy = sum(y)
squaredsumx = sumx**2
squaredsumy = sumy**2
xsquare = []
for xi in x:
xsquare.append(xi**2)
xsquaresum = sum(xsquare)
ysquare = []
for yi in y:
ysquare.append(yi**2)
ysquaresum = sum(ysquare)
numerator = n*sumprodxy - sumx*sumy
denominatorterm1 = n*xsquaresum - squaredsumx
denominatorterm2 = n*ysquaresum - squaredsumy
denominator = (denominatorterm1 * denominatorterm2)**0.5
correlation = numerator/denominator
return correlation
import matplotlib.pyplot as plt
x = [1, 2, 3, 4]
y = [2, 4, 6, 8]
plt.scatter(x, y)
plt.show()
def sumdata(filename):
s = 0
with open(filename) as f:
for line in f:
s = s + float(line)
print 'Sum of the numbers: {0}'.format(s)
if __name__ == '__main__':
sumdata('mydata.txt')
def read_data(filename):
numbers = []
with open(filename) as f:
for line in f:
numbers.append(float(line))
return numbers
if __name__ == '__main__':
data = read_data('mydata.txt')
mean = calculate_mean(data)
print 'Mean: {0}'.format(mean)
import csv
def scatterplot(x, y):
plt.scatter(x, y)
plt.xlabel('Number')
plt.ylabel('Square')
plt.show()
def read_csv(filename):
numbers = []
squared = []
with open(filename) as f:
reader = csv.reader(f)
next(reader)
for row in reader:
numbers.append(int(row[0]))
squared.append(int(row[1]))
return numbers, squared
if __name__ == '__main__':
numbers, squared = read_csv('numbers.csv')
scatterplot(numbers, squared)
def read_csv(filename):
with open(filename) as f:
reader = csv.reader(f)
next(reader)
summer = []
highest_correlated = []
for row in reader:
summer.append(float(row[0]))
highest_correlated.append(float(row[1]))
return summer, highest_correlated
if __name__ == '__main__':
summer, highest_correlated = read_csv('numbers.csv')
corr = findcorrxy(summer, highest_correlated)
print 'Highest correlation: {0}'.format(corr)
scatterplot(summer, highest_correlated)
|
unlicense
|
bnaul/scikit-learn
|
benchmarks/bench_plot_lasso_path.py
|
12
|
3976
|
"""Benchmarks of Lasso regularization path computation using Lars and CD
The input data is mostly low rank but is a fat infinite tail.
"""
from collections import defaultdict
import gc
import sys
from time import time
import numpy as np
from sklearn.linear_model import lars_path, lars_path_gram
from sklearn.linear_model import lasso_path
from sklearn.datasets import make_regression
def compute_bench(samples_range, features_range):
it = 0
results = defaultdict(lambda: [])
max_it = len(samples_range) * len(features_range)
for n_samples in samples_range:
for n_features in features_range:
it += 1
print('====================')
print('Iteration %03d of %03d' % (it, max_it))
print('====================')
dataset_kwargs = {
'n_samples': n_samples,
'n_features': n_features,
'n_informative': n_features // 10,
'effective_rank': min(n_samples, n_features) / 10,
#'effective_rank': None,
'bias': 0.0,
}
print("n_samples: %d" % n_samples)
print("n_features: %d" % n_features)
X, y = make_regression(**dataset_kwargs)
gc.collect()
print("benchmarking lars_path (with Gram):", end='')
sys.stdout.flush()
tstart = time()
G = np.dot(X.T, X) # precomputed Gram matrix
Xy = np.dot(X.T, y)
lars_path_gram(Xy=Xy, Gram=G, n_samples=y.size, method='lasso')
delta = time() - tstart
print("%0.3fs" % delta)
results['lars_path (with Gram)'].append(delta)
gc.collect()
print("benchmarking lars_path (without Gram):", end='')
sys.stdout.flush()
tstart = time()
lars_path(X, y, method='lasso')
delta = time() - tstart
print("%0.3fs" % delta)
results['lars_path (without Gram)'].append(delta)
gc.collect()
print("benchmarking lasso_path (with Gram):", end='')
sys.stdout.flush()
tstart = time()
lasso_path(X, y, precompute=True)
delta = time() - tstart
print("%0.3fs" % delta)
results['lasso_path (with Gram)'].append(delta)
gc.collect()
print("benchmarking lasso_path (without Gram):", end='')
sys.stdout.flush()
tstart = time()
lasso_path(X, y, precompute=False)
delta = time() - tstart
print("%0.3fs" % delta)
results['lasso_path (without Gram)'].append(delta)
return results
if __name__ == '__main__':
from mpl_toolkits.mplot3d import axes3d # register the 3d projection
import matplotlib.pyplot as plt
samples_range = np.linspace(10, 2000, 5).astype(int)
features_range = np.linspace(10, 2000, 5).astype(int)
results = compute_bench(samples_range, features_range)
max_time = max(max(t) for t in results.values())
fig = plt.figure('scikit-learn Lasso path benchmark results')
i = 1
for c, (label, timings) in zip('bcry', sorted(results.items())):
ax = fig.add_subplot(2, 2, i, projection='3d')
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0],
features_range.shape[0])
# plot the actual surface
ax.plot_surface(X, Y, Z.T, cstride=1, rstride=1, color=c, alpha=0.8)
# dummy point plot to stick the legend to since surface plot do not
# support legends (yet?)
# ax.plot([1], [1], [1], color=c, label=label)
ax.set_xlabel('n_samples')
ax.set_ylabel('n_features')
ax.set_zlabel('Time (s)')
ax.set_zlim3d(0.0, max_time * 1.1)
ax.set_title(label)
# ax.legend()
i += 1
plt.show()
|
bsd-3-clause
|
dsquareindia/scikit-learn
|
sklearn/manifold/isomap.py
|
39
|
7519
|
"""Isomap for manifold learning"""
# Author: Jake Vanderplas -- <[email protected]>
# License: BSD 3 clause (C) 2011
import numpy as np
from ..base import BaseEstimator, TransformerMixin
from ..neighbors import NearestNeighbors, kneighbors_graph
from ..utils import check_array
from ..utils.graph import graph_shortest_path
from ..decomposition import KernelPCA
from ..preprocessing import KernelCenterer
class Isomap(BaseEstimator, TransformerMixin):
"""Isomap Embedding
Non-linear dimensionality reduction through Isometric Mapping
Read more in the :ref:`User Guide <isomap>`.
Parameters
----------
n_neighbors : integer
number of neighbors to consider for each point.
n_components : integer
number of coordinates for the manifold
eigen_solver : ['auto'|'arpack'|'dense']
'auto' : Attempt to choose the most efficient solver
for the given problem.
'arpack' : Use Arnoldi decomposition to find the eigenvalues
and eigenvectors.
'dense' : Use a direct solver (i.e. LAPACK)
for the eigenvalue decomposition.
tol : float
Convergence tolerance passed to arpack or lobpcg.
not used if eigen_solver == 'dense'.
max_iter : integer
Maximum number of iterations for the arpack solver.
not used if eigen_solver == 'dense'.
path_method : string ['auto'|'FW'|'D']
Method to use in finding shortest path.
'auto' : attempt to choose the best algorithm automatically.
'FW' : Floyd-Warshall algorithm.
'D' : Dijkstra's algorithm.
neighbors_algorithm : string ['auto'|'brute'|'kd_tree'|'ball_tree']
Algorithm to use for nearest neighbors search,
passed to neighbors.NearestNeighbors instance.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Attributes
----------
embedding_ : array-like, shape (n_samples, n_components)
Stores the embedding vectors.
kernel_pca_ : object
`KernelPCA` object used to implement the embedding.
training_data_ : array-like, shape (n_samples, n_features)
Stores the training data.
nbrs_ : sklearn.neighbors.NearestNeighbors instance
Stores nearest neighbors instance, including BallTree or KDtree
if applicable.
dist_matrix_ : array-like, shape (n_samples, n_samples)
Stores the geodesic distance matrix of training data.
References
----------
.. [1] Tenenbaum, J.B.; De Silva, V.; & Langford, J.C. A global geometric
framework for nonlinear dimensionality reduction. Science 290 (5500)
"""
def __init__(self, n_neighbors=5, n_components=2, eigen_solver='auto',
tol=0, max_iter=None, path_method='auto',
neighbors_algorithm='auto', n_jobs=1):
self.n_neighbors = n_neighbors
self.n_components = n_components
self.eigen_solver = eigen_solver
self.tol = tol
self.max_iter = max_iter
self.path_method = path_method
self.neighbors_algorithm = neighbors_algorithm
self.n_jobs = n_jobs
def _fit_transform(self, X):
X = check_array(X)
self.nbrs_ = NearestNeighbors(n_neighbors=self.n_neighbors,
algorithm=self.neighbors_algorithm,
n_jobs=self.n_jobs)
self.nbrs_.fit(X)
self.training_data_ = self.nbrs_._fit_X
self.kernel_pca_ = KernelPCA(n_components=self.n_components,
kernel="precomputed",
eigen_solver=self.eigen_solver,
tol=self.tol, max_iter=self.max_iter,
n_jobs=self.n_jobs)
kng = kneighbors_graph(self.nbrs_, self.n_neighbors,
mode='distance', n_jobs=self.n_jobs)
self.dist_matrix_ = graph_shortest_path(kng,
method=self.path_method,
directed=False)
G = self.dist_matrix_ ** 2
G *= -0.5
self.embedding_ = self.kernel_pca_.fit_transform(G)
def reconstruction_error(self):
"""Compute the reconstruction error for the embedding.
Returns
-------
reconstruction_error : float
Notes
-------
The cost function of an isomap embedding is
``E = frobenius_norm[K(D) - K(D_fit)] / n_samples``
Where D is the matrix of distances for the input data X,
D_fit is the matrix of distances for the output embedding X_fit,
and K is the isomap kernel:
``K(D) = -0.5 * (I - 1/n_samples) * D^2 * (I - 1/n_samples)``
"""
G = -0.5 * self.dist_matrix_ ** 2
G_center = KernelCenterer().fit_transform(G)
evals = self.kernel_pca_.lambdas_
return np.sqrt(np.sum(G_center ** 2) - np.sum(evals ** 2)) / G.shape[0]
def fit(self, X, y=None):
"""Compute the embedding vectors for data X
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree, NearestNeighbors}
Sample data, shape = (n_samples, n_features), in the form of a
numpy array, precomputed tree, or NearestNeighbors
object.
Returns
-------
self : returns an instance of self.
"""
self._fit_transform(X)
return self
def fit_transform(self, X, y=None):
"""Fit the model from data in X and transform X.
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree}
Training vector, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
self._fit_transform(X)
return self.embedding_
def transform(self, X):
"""Transform X.
This is implemented by linking the points X into the graph of geodesic
distances of the training data. First the `n_neighbors` nearest
neighbors of X are found in the training data, and from these the
shortest geodesic distances from each point in X to each point in
the training data are computed in order to construct the kernel.
The embedding of X is the projection of this kernel onto the
embedding vectors of the training set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
X = check_array(X)
distances, indices = self.nbrs_.kneighbors(X, return_distance=True)
# Create the graph of shortest distances from X to self.training_data_
# via the nearest neighbors of X.
# This can be done as a single array operation, but it potentially
# takes a lot of memory. To avoid that, use a loop:
G_X = np.zeros((X.shape[0], self.training_data_.shape[0]))
for i in range(X.shape[0]):
G_X[i] = np.min(self.dist_matrix_[indices[i]] +
distances[i][:, None], 0)
G_X **= 2
G_X *= -0.5
return self.kernel_pca_.transform(G_X)
|
bsd-3-clause
|
iismd17/scikit-learn
|
sklearn/linear_model/least_angle.py
|
61
|
54324
|
"""
Least Angle Regression algorithm. See the documentation on the
Generalized Linear Model for a complete discussion.
"""
from __future__ import print_function
# Author: Fabian Pedregosa <[email protected]>
# Alexandre Gramfort <[email protected]>
# Gael Varoquaux
#
# License: BSD 3 clause
from math import log
import sys
import warnings
from distutils.version import LooseVersion
import numpy as np
from scipy import linalg, interpolate
from scipy.linalg.lapack import get_lapack_funcs
from .base import LinearModel
from ..base import RegressorMixin
from ..utils import arrayfuncs, as_float_array, check_X_y
from ..cross_validation import check_cv
from ..utils import ConvergenceWarning
from ..externals.joblib import Parallel, delayed
from ..externals.six.moves import xrange
import scipy
solve_triangular_args = {}
if LooseVersion(scipy.__version__) >= LooseVersion('0.12'):
solve_triangular_args = {'check_finite': False}
def lars_path(X, y, Xy=None, Gram=None, max_iter=500,
alpha_min=0, method='lar', copy_X=True,
eps=np.finfo(np.float).eps,
copy_Gram=True, verbose=0, return_path=True,
return_n_iter=False, positive=False):
"""Compute Least Angle Regression or Lasso path using LARS algorithm [1]
The optimization objective for the case method='lasso' is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
in the case of method='lars', the objective function is only known in
the form of an implicit equation (see discussion in [1])
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
-----------
X : array, shape: (n_samples, n_features)
Input data.
y : array, shape: (n_samples)
Input targets.
positive : boolean (default=False)
Restrict coefficients to be >= 0.
When using this option together with method 'lasso' the model
coefficients will not converge to the ordinary-least-squares solution
for small values of alpha (neither will they when using method 'lar'
..). Only coeffiencts up to the smallest alpha value (``alphas_[alphas_ >
0.].min()`` when fit_path=True) reached by the stepwise Lars-Lasso
algorithm are typically in congruence with the solution of the
coordinate descent lasso_path function.
max_iter : integer, optional (default=500)
Maximum number of iterations to perform, set to infinity for no limit.
Gram : None, 'auto', array, shape: (n_features, n_features), optional
Precomputed Gram matrix (X' * X), if ``'auto'``, the Gram
matrix is precomputed from the given X, if there are more samples
than features.
alpha_min : float, optional (default=0)
Minimum correlation along the path. It corresponds to the
regularization parameter alpha parameter in the Lasso.
method : {'lar', 'lasso'}, optional (default='lar')
Specifies the returned model. Select ``'lar'`` for Least Angle
Regression, ``'lasso'`` for the Lasso.
eps : float, optional (default=``np.finfo(np.float).eps``)
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems.
copy_X : bool, optional (default=True)
If ``False``, ``X`` is overwritten.
copy_Gram : bool, optional (default=True)
If ``False``, ``Gram`` is overwritten.
verbose : int (default=0)
Controls output verbosity.
return_path : bool, optional (default=True)
If ``return_path==True`` returns the entire path, else returns only the
last point of the path.
return_n_iter : bool, optional (default=False)
Whether to return the number of iterations.
Returns
--------
alphas : array, shape: [n_alphas + 1]
Maximum of covariances (in absolute value) at each iteration.
``n_alphas`` is either ``max_iter``, ``n_features`` or the
number of nodes in the path with ``alpha >= alpha_min``, whichever
is smaller.
active : array, shape [n_alphas]
Indices of active variables at the end of the path.
coefs : array, shape (n_features, n_alphas + 1)
Coefficients along the path
n_iter : int
Number of iterations run. Returned only if return_n_iter is set
to True.
See also
--------
lasso_path
LassoLars
Lars
LassoLarsCV
LarsCV
sklearn.decomposition.sparse_encode
References
----------
.. [1] "Least Angle Regression", Effron et al.
http://www-stat.stanford.edu/~tibs/ftp/lars.pdf
.. [2] `Wikipedia entry on the Least-angle regression
<http://en.wikipedia.org/wiki/Least-angle_regression>`_
.. [3] `Wikipedia entry on the Lasso
<http://en.wikipedia.org/wiki/Lasso_(statistics)#Lasso_method>`_
"""
n_features = X.shape[1]
n_samples = y.size
max_features = min(max_iter, n_features)
if return_path:
coefs = np.zeros((max_features + 1, n_features))
alphas = np.zeros(max_features + 1)
else:
coef, prev_coef = np.zeros(n_features), np.zeros(n_features)
alpha, prev_alpha = np.array([0.]), np.array([0.]) # better ideas?
n_iter, n_active = 0, 0
active, indices = list(), np.arange(n_features)
# holds the sign of covariance
sign_active = np.empty(max_features, dtype=np.int8)
drop = False
# will hold the cholesky factorization. Only lower part is
# referenced.
# We are initializing this to "zeros" and not empty, because
# it is passed to scipy linalg functions and thus if it has NaNs,
# even if they are in the upper part that it not used, we
# get errors raised.
# Once we support only scipy > 0.12 we can use check_finite=False and
# go back to "empty"
L = np.zeros((max_features, max_features), dtype=X.dtype)
swap, nrm2 = linalg.get_blas_funcs(('swap', 'nrm2'), (X,))
solve_cholesky, = get_lapack_funcs(('potrs',), (X,))
if Gram is None:
if copy_X:
# force copy. setting the array to be fortran-ordered
# speeds up the calculation of the (partial) Gram matrix
# and allows to easily swap columns
X = X.copy('F')
elif Gram == 'auto':
Gram = None
if X.shape[0] > X.shape[1]:
Gram = np.dot(X.T, X)
elif copy_Gram:
Gram = Gram.copy()
if Xy is None:
Cov = np.dot(X.T, y)
else:
Cov = Xy.copy()
if verbose:
if verbose > 1:
print("Step\t\tAdded\t\tDropped\t\tActive set size\t\tC")
else:
sys.stdout.write('.')
sys.stdout.flush()
tiny = np.finfo(np.float).tiny # to avoid division by 0 warning
tiny32 = np.finfo(np.float32).tiny # to avoid division by 0 warning
equality_tolerance = np.finfo(np.float32).eps
while True:
if Cov.size:
if positive:
C_idx = np.argmax(Cov)
else:
C_idx = np.argmax(np.abs(Cov))
C_ = Cov[C_idx]
if positive:
C = C_
else:
C = np.fabs(C_)
else:
C = 0.
if return_path:
alpha = alphas[n_iter, np.newaxis]
coef = coefs[n_iter]
prev_alpha = alphas[n_iter - 1, np.newaxis]
prev_coef = coefs[n_iter - 1]
alpha[0] = C / n_samples
if alpha[0] <= alpha_min + equality_tolerance: # early stopping
if abs(alpha[0] - alpha_min) > equality_tolerance:
# interpolation factor 0 <= ss < 1
if n_iter > 0:
# In the first iteration, all alphas are zero, the formula
# below would make ss a NaN
ss = ((prev_alpha[0] - alpha_min) /
(prev_alpha[0] - alpha[0]))
coef[:] = prev_coef + ss * (coef - prev_coef)
alpha[0] = alpha_min
if return_path:
coefs[n_iter] = coef
break
if n_iter >= max_iter or n_active >= n_features:
break
if not drop:
##########################################################
# Append x_j to the Cholesky factorization of (Xa * Xa') #
# #
# ( L 0 ) #
# L -> ( ) , where L * w = Xa' x_j #
# ( w z ) and z = ||x_j|| #
# #
##########################################################
if positive:
sign_active[n_active] = np.ones_like(C_)
else:
sign_active[n_active] = np.sign(C_)
m, n = n_active, C_idx + n_active
Cov[C_idx], Cov[0] = swap(Cov[C_idx], Cov[0])
indices[n], indices[m] = indices[m], indices[n]
Cov_not_shortened = Cov
Cov = Cov[1:] # remove Cov[0]
if Gram is None:
X.T[n], X.T[m] = swap(X.T[n], X.T[m])
c = nrm2(X.T[n_active]) ** 2
L[n_active, :n_active] = \
np.dot(X.T[n_active], X.T[:n_active].T)
else:
# swap does only work inplace if matrix is fortran
# contiguous ...
Gram[m], Gram[n] = swap(Gram[m], Gram[n])
Gram[:, m], Gram[:, n] = swap(Gram[:, m], Gram[:, n])
c = Gram[n_active, n_active]
L[n_active, :n_active] = Gram[n_active, :n_active]
# Update the cholesky decomposition for the Gram matrix
if n_active:
linalg.solve_triangular(L[:n_active, :n_active],
L[n_active, :n_active],
trans=0, lower=1,
overwrite_b=True,
**solve_triangular_args)
v = np.dot(L[n_active, :n_active], L[n_active, :n_active])
diag = max(np.sqrt(np.abs(c - v)), eps)
L[n_active, n_active] = diag
if diag < 1e-7:
# The system is becoming too ill-conditioned.
# We have degenerate vectors in our active set.
# We'll 'drop for good' the last regressor added.
# Note: this case is very rare. It is no longer triggered by the
# test suite. The `equality_tolerance` margin added in 0.16.0 to
# get early stopping to work consistently on all versions of
# Python including 32 bit Python under Windows seems to make it
# very difficult to trigger the 'drop for good' strategy.
warnings.warn('Regressors in active set degenerate. '
'Dropping a regressor, after %i iterations, '
'i.e. alpha=%.3e, '
'with an active set of %i regressors, and '
'the smallest cholesky pivot element being %.3e'
% (n_iter, alpha, n_active, diag),
ConvergenceWarning)
# XXX: need to figure a 'drop for good' way
Cov = Cov_not_shortened
Cov[0] = 0
Cov[C_idx], Cov[0] = swap(Cov[C_idx], Cov[0])
continue
active.append(indices[n_active])
n_active += 1
if verbose > 1:
print("%s\t\t%s\t\t%s\t\t%s\t\t%s" % (n_iter, active[-1], '',
n_active, C))
if method == 'lasso' and n_iter > 0 and prev_alpha[0] < alpha[0]:
# alpha is increasing. This is because the updates of Cov are
# bringing in too much numerical error that is greater than
# than the remaining correlation with the
# regressors. Time to bail out
warnings.warn('Early stopping the lars path, as the residues '
'are small and the current value of alpha is no '
'longer well controlled. %i iterations, alpha=%.3e, '
'previous alpha=%.3e, with an active set of %i '
'regressors.'
% (n_iter, alpha, prev_alpha, n_active),
ConvergenceWarning)
break
# least squares solution
least_squares, info = solve_cholesky(L[:n_active, :n_active],
sign_active[:n_active],
lower=True)
if least_squares.size == 1 and least_squares == 0:
# This happens because sign_active[:n_active] = 0
least_squares[...] = 1
AA = 1.
else:
# is this really needed ?
AA = 1. / np.sqrt(np.sum(least_squares * sign_active[:n_active]))
if not np.isfinite(AA):
# L is too ill-conditioned
i = 0
L_ = L[:n_active, :n_active].copy()
while not np.isfinite(AA):
L_.flat[::n_active + 1] += (2 ** i) * eps
least_squares, info = solve_cholesky(
L_, sign_active[:n_active], lower=True)
tmp = max(np.sum(least_squares * sign_active[:n_active]),
eps)
AA = 1. / np.sqrt(tmp)
i += 1
least_squares *= AA
if Gram is None:
# equiangular direction of variables in the active set
eq_dir = np.dot(X.T[:n_active].T, least_squares)
# correlation between each unactive variables and
# eqiangular vector
corr_eq_dir = np.dot(X.T[n_active:], eq_dir)
else:
# if huge number of features, this takes 50% of time, I
# think could be avoided if we just update it using an
# orthogonal (QR) decomposition of X
corr_eq_dir = np.dot(Gram[:n_active, n_active:].T,
least_squares)
g1 = arrayfuncs.min_pos((C - Cov) / (AA - corr_eq_dir + tiny))
if positive:
gamma_ = min(g1, C / AA)
else:
g2 = arrayfuncs.min_pos((C + Cov) / (AA + corr_eq_dir + tiny))
gamma_ = min(g1, g2, C / AA)
# TODO: better names for these variables: z
drop = False
z = -coef[active] / (least_squares + tiny32)
z_pos = arrayfuncs.min_pos(z)
if z_pos < gamma_:
# some coefficients have changed sign
idx = np.where(z == z_pos)[0][::-1]
# update the sign, important for LAR
sign_active[idx] = -sign_active[idx]
if method == 'lasso':
gamma_ = z_pos
drop = True
n_iter += 1
if return_path:
if n_iter >= coefs.shape[0]:
del coef, alpha, prev_alpha, prev_coef
# resize the coefs and alphas array
add_features = 2 * max(1, (max_features - n_active))
coefs = np.resize(coefs, (n_iter + add_features, n_features))
alphas = np.resize(alphas, n_iter + add_features)
coef = coefs[n_iter]
prev_coef = coefs[n_iter - 1]
alpha = alphas[n_iter, np.newaxis]
prev_alpha = alphas[n_iter - 1, np.newaxis]
else:
# mimic the effect of incrementing n_iter on the array references
prev_coef = coef
prev_alpha[0] = alpha[0]
coef = np.zeros_like(coef)
coef[active] = prev_coef[active] + gamma_ * least_squares
# update correlations
Cov -= gamma_ * corr_eq_dir
# See if any coefficient has changed sign
if drop and method == 'lasso':
# handle the case when idx is not length of 1
[arrayfuncs.cholesky_delete(L[:n_active, :n_active], ii) for ii in
idx]
n_active -= 1
m, n = idx, n_active
# handle the case when idx is not length of 1
drop_idx = [active.pop(ii) for ii in idx]
if Gram is None:
# propagate dropped variable
for ii in idx:
for i in range(ii, n_active):
X.T[i], X.T[i + 1] = swap(X.T[i], X.T[i + 1])
# yeah this is stupid
indices[i], indices[i + 1] = indices[i + 1], indices[i]
# TODO: this could be updated
residual = y - np.dot(X[:, :n_active], coef[active])
temp = np.dot(X.T[n_active], residual)
Cov = np.r_[temp, Cov]
else:
for ii in idx:
for i in range(ii, n_active):
indices[i], indices[i + 1] = indices[i + 1], indices[i]
Gram[i], Gram[i + 1] = swap(Gram[i], Gram[i + 1])
Gram[:, i], Gram[:, i + 1] = swap(Gram[:, i],
Gram[:, i + 1])
# Cov_n = Cov_j + x_j * X + increment(betas) TODO:
# will this still work with multiple drops ?
# recompute covariance. Probably could be done better
# wrong as Xy is not swapped with the rest of variables
# TODO: this could be updated
residual = y - np.dot(X, coef)
temp = np.dot(X.T[drop_idx], residual)
Cov = np.r_[temp, Cov]
sign_active = np.delete(sign_active, idx)
sign_active = np.append(sign_active, 0.) # just to maintain size
if verbose > 1:
print("%s\t\t%s\t\t%s\t\t%s\t\t%s" % (n_iter, '', drop_idx,
n_active, abs(temp)))
if return_path:
# resize coefs in case of early stop
alphas = alphas[:n_iter + 1]
coefs = coefs[:n_iter + 1]
if return_n_iter:
return alphas, active, coefs.T, n_iter
else:
return alphas, active, coefs.T
else:
if return_n_iter:
return alpha, active, coef, n_iter
else:
return alpha, active, coef
###############################################################################
# Estimator classes
class Lars(LinearModel, RegressorMixin):
"""Least Angle Regression model a.k.a. LAR
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
n_nonzero_coefs : int, optional
Target number of non-zero coefficients. Use ``np.inf`` for no limit.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
positive : boolean (default=False)
Restrict coefficients to be >= 0. Be aware that you might want to
remove fit_intercept which is set True by default.
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
fit_path : boolean
If True the full path is stored in the ``coef_path_`` attribute.
If you compute the solution for a large problem or many targets,
setting ``fit_path`` to ``False`` will lead to a speedup, especially
with a small alpha.
Attributes
----------
alphas_ : array, shape (n_alphas + 1,) | list of n_targets such arrays
Maximum of covariances (in absolute value) at each iteration. \
``n_alphas`` is either ``n_nonzero_coefs`` or ``n_features``, \
whichever is smaller.
active_ : list, length = n_alphas | list of n_targets such lists
Indices of active variables at the end of the path.
coef_path_ : array, shape (n_features, n_alphas + 1) \
| list of n_targets such arrays
The varying values of the coefficients along the path. It is not
present if the ``fit_path`` parameter is ``False``.
coef_ : array, shape (n_features,) or (n_targets, n_features)
Parameter vector (w in the formulation formula).
intercept_ : float | array, shape (n_targets,)
Independent term in decision function.
n_iter_ : array-like or int
The number of iterations taken by lars_path to find the
grid of alphas for each target.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.Lars(n_nonzero_coefs=1)
>>> clf.fit([[-1, 1], [0, 0], [1, 1]], [-1.1111, 0, -1.1111])
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
Lars(copy_X=True, eps=..., fit_intercept=True, fit_path=True,
n_nonzero_coefs=1, normalize=True, positive=False, precompute='auto',
verbose=False)
>>> print(clf.coef_) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
[ 0. -1.11...]
See also
--------
lars_path, LarsCV
sklearn.decomposition.sparse_encode
"""
def __init__(self, fit_intercept=True, verbose=False, normalize=True,
precompute='auto', n_nonzero_coefs=500,
eps=np.finfo(np.float).eps, copy_X=True, fit_path=True,
positive=False):
self.fit_intercept = fit_intercept
self.verbose = verbose
self.normalize = normalize
self.method = 'lar'
self.precompute = precompute
self.n_nonzero_coefs = n_nonzero_coefs
self.positive = positive
self.eps = eps
self.copy_X = copy_X
self.fit_path = fit_path
def _get_gram(self):
# precompute if n_samples > n_features
precompute = self.precompute
if hasattr(precompute, '__array__'):
Gram = precompute
elif precompute == 'auto':
Gram = 'auto'
else:
Gram = None
return Gram
def fit(self, X, y, Xy=None):
"""Fit the model using X, y as training data.
parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
Xy : array-like, shape (n_samples,) or (n_samples, n_targets), \
optional
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
returns
-------
self : object
returns an instance of self.
"""
X, y = check_X_y(X, y, y_numeric=True, multi_output=True)
n_features = X.shape[1]
X, y, X_mean, y_mean, X_std = self._center_data(X, y,
self.fit_intercept,
self.normalize,
self.copy_X)
if y.ndim == 1:
y = y[:, np.newaxis]
n_targets = y.shape[1]
alpha = getattr(self, 'alpha', 0.)
if hasattr(self, 'n_nonzero_coefs'):
alpha = 0. # n_nonzero_coefs parametrization takes priority
max_iter = self.n_nonzero_coefs
else:
max_iter = self.max_iter
precompute = self.precompute
if not hasattr(precompute, '__array__') and (
precompute is True or
(precompute == 'auto' and X.shape[0] > X.shape[1]) or
(precompute == 'auto' and y.shape[1] > 1)):
Gram = np.dot(X.T, X)
else:
Gram = self._get_gram()
self.alphas_ = []
self.n_iter_ = []
if self.fit_path:
self.coef_ = []
self.active_ = []
self.coef_path_ = []
for k in xrange(n_targets):
this_Xy = None if Xy is None else Xy[:, k]
alphas, active, coef_path, n_iter_ = lars_path(
X, y[:, k], Gram=Gram, Xy=this_Xy, copy_X=self.copy_X,
copy_Gram=True, alpha_min=alpha, method=self.method,
verbose=max(0, self.verbose - 1), max_iter=max_iter,
eps=self.eps, return_path=True,
return_n_iter=True, positive=self.positive)
self.alphas_.append(alphas)
self.active_.append(active)
self.n_iter_.append(n_iter_)
self.coef_path_.append(coef_path)
self.coef_.append(coef_path[:, -1])
if n_targets == 1:
self.alphas_, self.active_, self.coef_path_, self.coef_ = [
a[0] for a in (self.alphas_, self.active_, self.coef_path_,
self.coef_)]
self.n_iter_ = self.n_iter_[0]
else:
self.coef_ = np.empty((n_targets, n_features))
for k in xrange(n_targets):
this_Xy = None if Xy is None else Xy[:, k]
alphas, _, self.coef_[k], n_iter_ = lars_path(
X, y[:, k], Gram=Gram, Xy=this_Xy, copy_X=self.copy_X,
copy_Gram=True, alpha_min=alpha, method=self.method,
verbose=max(0, self.verbose - 1), max_iter=max_iter,
eps=self.eps, return_path=False, return_n_iter=True,
positive=self.positive)
self.alphas_.append(alphas)
self.n_iter_.append(n_iter_)
if n_targets == 1:
self.alphas_ = self.alphas_[0]
self.n_iter_ = self.n_iter_[0]
self._set_intercept(X_mean, y_mean, X_std)
return self
class LassoLars(Lars):
"""Lasso model fit with Least Angle Regression a.k.a. Lars
It is a Linear Model trained with an L1 prior as regularizer.
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
alpha : float
Constant that multiplies the penalty term. Defaults to 1.0.
``alpha = 0`` is equivalent to an ordinary least square, solved
by :class:`LinearRegression`. For numerical reasons, using
``alpha = 0`` with the LassoLars object is not advised and you
should prefer the LinearRegression object.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
positive : boolean (default=False)
Restrict coefficients to be >= 0. Be aware that you might want to
remove fit_intercept which is set True by default.
Under the positive restriction the model coefficients will not converge
to the ordinary-least-squares solution for small values of alpha.
Only coeffiencts up to the smallest alpha value (``alphas_[alphas_ >
0.].min()`` when fit_path=True) reached by the stepwise Lars-Lasso
algorithm are typically in congruence with the solution of the
coordinate descent Lasso estimator.
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : integer, optional
Maximum number of iterations to perform.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
fit_path : boolean
If ``True`` the full path is stored in the ``coef_path_`` attribute.
If you compute the solution for a large problem or many targets,
setting ``fit_path`` to ``False`` will lead to a speedup, especially
with a small alpha.
Attributes
----------
alphas_ : array, shape (n_alphas + 1,) | list of n_targets such arrays
Maximum of covariances (in absolute value) at each iteration. \
``n_alphas`` is either ``max_iter``, ``n_features``, or the number of \
nodes in the path with correlation greater than ``alpha``, whichever \
is smaller.
active_ : list, length = n_alphas | list of n_targets such lists
Indices of active variables at the end of the path.
coef_path_ : array, shape (n_features, n_alphas + 1) or list
If a list is passed it's expected to be one of n_targets such arrays.
The varying values of the coefficients along the path. It is not
present if the ``fit_path`` parameter is ``False``.
coef_ : array, shape (n_features,) or (n_targets, n_features)
Parameter vector (w in the formulation formula).
intercept_ : float | array, shape (n_targets,)
Independent term in decision function.
n_iter_ : array-like or int.
The number of iterations taken by lars_path to find the
grid of alphas for each target.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.LassoLars(alpha=0.01)
>>> clf.fit([[-1, 1], [0, 0], [1, 1]], [-1, 0, -1])
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
LassoLars(alpha=0.01, copy_X=True, eps=..., fit_intercept=True,
fit_path=True, max_iter=500, normalize=True, positive=False,
precompute='auto', verbose=False)
>>> print(clf.coef_) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
[ 0. -0.963257...]
See also
--------
lars_path
lasso_path
Lasso
LassoCV
LassoLarsCV
sklearn.decomposition.sparse_encode
"""
def __init__(self, alpha=1.0, fit_intercept=True, verbose=False,
normalize=True, precompute='auto', max_iter=500,
eps=np.finfo(np.float).eps, copy_X=True, fit_path=True,
positive=False):
self.alpha = alpha
self.fit_intercept = fit_intercept
self.max_iter = max_iter
self.verbose = verbose
self.normalize = normalize
self.method = 'lasso'
self.positive = positive
self.precompute = precompute
self.copy_X = copy_X
self.eps = eps
self.fit_path = fit_path
###############################################################################
# Cross-validated estimator classes
def _check_copy_and_writeable(array, copy=False):
if copy or not array.flags.writeable:
return array.copy()
return array
def _lars_path_residues(X_train, y_train, X_test, y_test, Gram=None,
copy=True, method='lars', verbose=False,
fit_intercept=True, normalize=True, max_iter=500,
eps=np.finfo(np.float).eps, positive=False):
"""Compute the residues on left-out data for a full LARS path
Parameters
-----------
X_train : array, shape (n_samples, n_features)
The data to fit the LARS on
y_train : array, shape (n_samples)
The target variable to fit LARS on
X_test : array, shape (n_samples, n_features)
The data to compute the residues on
y_test : array, shape (n_samples)
The target variable to compute the residues on
Gram : None, 'auto', array, shape: (n_features, n_features), optional
Precomputed Gram matrix (X' * X), if ``'auto'``, the Gram
matrix is precomputed from the given X, if there are more samples
than features
copy : boolean, optional
Whether X_train, X_test, y_train and y_test should be copied;
if False, they may be overwritten.
method : 'lar' | 'lasso'
Specifies the returned model. Select ``'lar'`` for Least Angle
Regression, ``'lasso'`` for the Lasso.
verbose : integer, optional
Sets the amount of verbosity
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
positive : boolean (default=False)
Restrict coefficients to be >= 0. Be aware that you might want to
remove fit_intercept which is set True by default.
See reservations for using this option in combination with method
'lasso' for expected small values of alpha in the doc of LassoLarsCV
and LassoLarsIC.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
max_iter : integer, optional
Maximum number of iterations to perform.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
Returns
--------
alphas : array, shape (n_alphas,)
Maximum of covariances (in absolute value) at each iteration.
``n_alphas`` is either ``max_iter`` or ``n_features``, whichever
is smaller.
active : list
Indices of active variables at the end of the path.
coefs : array, shape (n_features, n_alphas)
Coefficients along the path
residues : array, shape (n_alphas, n_samples)
Residues of the prediction on the test data
"""
X_train = _check_copy_and_writeable(X_train, copy)
y_train = _check_copy_and_writeable(y_train, copy)
X_test = _check_copy_and_writeable(X_test, copy)
y_test = _check_copy_and_writeable(y_test, copy)
if fit_intercept:
X_mean = X_train.mean(axis=0)
X_train -= X_mean
X_test -= X_mean
y_mean = y_train.mean(axis=0)
y_train = as_float_array(y_train, copy=False)
y_train -= y_mean
y_test = as_float_array(y_test, copy=False)
y_test -= y_mean
if normalize:
norms = np.sqrt(np.sum(X_train ** 2, axis=0))
nonzeros = np.flatnonzero(norms)
X_train[:, nonzeros] /= norms[nonzeros]
alphas, active, coefs = lars_path(
X_train, y_train, Gram=Gram, copy_X=False, copy_Gram=False,
method=method, verbose=max(0, verbose - 1), max_iter=max_iter, eps=eps,
positive=positive)
if normalize:
coefs[nonzeros] /= norms[nonzeros][:, np.newaxis]
residues = np.dot(X_test, coefs) - y_test[:, np.newaxis]
return alphas, active, coefs, residues.T
class LarsCV(Lars):
"""Cross-validated Least Angle Regression model
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
positive : boolean (default=False)
Restrict coefficients to be >= 0. Be aware that you might want to
remove fit_intercept which is set True by default.
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter: integer, optional
Maximum number of iterations to perform.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
max_n_alphas : integer, optional
The maximum number of points on the path used to compute the
residuals in the cross-validation
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems.
Attributes
----------
coef_ : array, shape (n_features,)
parameter vector (w in the formulation formula)
intercept_ : float
independent term in decision function
coef_path_ : array, shape (n_features, n_alphas)
the varying values of the coefficients along the path
alpha_ : float
the estimated regularization parameter alpha
alphas_ : array, shape (n_alphas,)
the different values of alpha along the path
cv_alphas_ : array, shape (n_cv_alphas,)
all the values of alpha along the path for the different folds
cv_mse_path_ : array, shape (n_folds, n_cv_alphas)
the mean square error on left-out for each fold along the path
(alpha values given by ``cv_alphas``)
n_iter_ : array-like or int
the number of iterations run by Lars with the optimal alpha.
See also
--------
lars_path, LassoLars, LassoLarsCV
"""
method = 'lar'
def __init__(self, fit_intercept=True, verbose=False, max_iter=500,
normalize=True, precompute='auto', cv=None,
max_n_alphas=1000, n_jobs=1, eps=np.finfo(np.float).eps,
copy_X=True, positive=False):
self.fit_intercept = fit_intercept
self.positive = positive
self.max_iter = max_iter
self.verbose = verbose
self.normalize = normalize
self.precompute = precompute
self.copy_X = copy_X
self.cv = cv
self.max_n_alphas = max_n_alphas
self.n_jobs = n_jobs
self.eps = eps
def fit(self, X, y):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,)
Target values.
Returns
-------
self : object
returns an instance of self.
"""
self.fit_path = True
X, y = check_X_y(X, y, y_numeric=True)
# init cross-validation generator
cv = check_cv(self.cv, X, y, classifier=False)
Gram = 'auto' if self.precompute else None
cv_paths = Parallel(n_jobs=self.n_jobs, verbose=self.verbose)(
delayed(_lars_path_residues)(
X[train], y[train], X[test], y[test], Gram=Gram, copy=False,
method=self.method, verbose=max(0, self.verbose - 1),
normalize=self.normalize, fit_intercept=self.fit_intercept,
max_iter=self.max_iter, eps=self.eps, positive=self.positive)
for train, test in cv)
all_alphas = np.concatenate(list(zip(*cv_paths))[0])
# Unique also sorts
all_alphas = np.unique(all_alphas)
# Take at most max_n_alphas values
stride = int(max(1, int(len(all_alphas) / float(self.max_n_alphas))))
all_alphas = all_alphas[::stride]
mse_path = np.empty((len(all_alphas), len(cv_paths)))
for index, (alphas, active, coefs, residues) in enumerate(cv_paths):
alphas = alphas[::-1]
residues = residues[::-1]
if alphas[0] != 0:
alphas = np.r_[0, alphas]
residues = np.r_[residues[0, np.newaxis], residues]
if alphas[-1] != all_alphas[-1]:
alphas = np.r_[alphas, all_alphas[-1]]
residues = np.r_[residues, residues[-1, np.newaxis]]
this_residues = interpolate.interp1d(alphas,
residues,
axis=0)(all_alphas)
this_residues **= 2
mse_path[:, index] = np.mean(this_residues, axis=-1)
mask = np.all(np.isfinite(mse_path), axis=-1)
all_alphas = all_alphas[mask]
mse_path = mse_path[mask]
# Select the alpha that minimizes left-out error
i_best_alpha = np.argmin(mse_path.mean(axis=-1))
best_alpha = all_alphas[i_best_alpha]
# Store our parameters
self.alpha_ = best_alpha
self.cv_alphas_ = all_alphas
self.cv_mse_path_ = mse_path
# Now compute the full model
# it will call a lasso internally when self if LassoLarsCV
# as self.method == 'lasso'
Lars.fit(self, X, y)
return self
@property
def alpha(self):
# impedance matching for the above Lars.fit (should not be documented)
return self.alpha_
class LassoLarsCV(LarsCV):
"""Cross-validated Lasso, using the LARS algorithm
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
positive : boolean (default=False)
Restrict coefficients to be >= 0. Be aware that you might want to
remove fit_intercept which is set True by default.
Under the positive restriction the model coefficients do not converge
to the ordinary-least-squares solution for small values of alpha.
Only coeffiencts up to the smallest alpha value (``alphas_[alphas_ >
0.].min()`` when fit_path=True) reached by the stepwise Lars-Lasso
algorithm are typically in congruence with the solution of the
coordinate descent Lasso estimator.
As a consequence using LassoLarsCV only makes sense for problems where
a sparse solution is expected and/or reached.
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : integer, optional
Maximum number of iterations to perform.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
max_n_alphas : integer, optional
The maximum number of points on the path used to compute the
residuals in the cross-validation
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
Attributes
----------
coef_ : array, shape (n_features,)
parameter vector (w in the formulation formula)
intercept_ : float
independent term in decision function.
coef_path_ : array, shape (n_features, n_alphas)
the varying values of the coefficients along the path
alpha_ : float
the estimated regularization parameter alpha
alphas_ : array, shape (n_alphas,)
the different values of alpha along the path
cv_alphas_ : array, shape (n_cv_alphas,)
all the values of alpha along the path for the different folds
cv_mse_path_ : array, shape (n_folds, n_cv_alphas)
the mean square error on left-out for each fold along the path
(alpha values given by ``cv_alphas``)
n_iter_ : array-like or int
the number of iterations run by Lars with the optimal alpha.
Notes
-----
The object solves the same problem as the LassoCV object. However,
unlike the LassoCV, it find the relevant alphas values by itself.
In general, because of this property, it will be more stable.
However, it is more fragile to heavily multicollinear datasets.
It is more efficient than the LassoCV if only a small number of
features are selected compared to the total number, for instance if
there are very few samples compared to the number of features.
See also
--------
lars_path, LassoLars, LarsCV, LassoCV
"""
method = 'lasso'
class LassoLarsIC(LassoLars):
"""Lasso model fit with Lars using BIC or AIC for model selection
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
AIC is the Akaike information criterion and BIC is the Bayes
Information criterion. Such criteria are useful to select the value
of the regularization parameter by making a trade-off between the
goodness of fit and the complexity of the model. A good model should
explain well the data while being simple.
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
criterion : 'bic' | 'aic'
The type of criterion to use.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
positive : boolean (default=False)
Restrict coefficients to be >= 0. Be aware that you might want to
remove fit_intercept which is set True by default.
Under the positive restriction the model coefficients do not converge
to the ordinary-least-squares solution for small values of alpha.
Only coeffiencts up to the smallest alpha value (``alphas_[alphas_ >
0.].min()`` when fit_path=True) reached by the stepwise Lars-Lasso
algorithm are typically in congruence with the solution of the
coordinate descent Lasso estimator.
As a consequence using LassoLarsIC only makes sense for problems where
a sparse solution is expected and/or reached.
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : integer, optional
Maximum number of iterations to perform. Can be used for
early stopping.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
Attributes
----------
coef_ : array, shape (n_features,)
parameter vector (w in the formulation formula)
intercept_ : float
independent term in decision function.
alpha_ : float
the alpha parameter chosen by the information criterion
n_iter_ : int
number of iterations run by lars_path to find the grid of
alphas.
criterion_ : array, shape (n_alphas,)
The value of the information criteria ('aic', 'bic') across all
alphas. The alpha which has the smallest information criteria
is chosen.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.LassoLarsIC(criterion='bic')
>>> clf.fit([[-1, 1], [0, 0], [1, 1]], [-1.1111, 0, -1.1111])
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
LassoLarsIC(copy_X=True, criterion='bic', eps=..., fit_intercept=True,
max_iter=500, normalize=True, positive=False, precompute='auto',
verbose=False)
>>> print(clf.coef_) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
[ 0. -1.11...]
Notes
-----
The estimation of the number of degrees of freedom is given by:
"On the degrees of freedom of the lasso"
Hui Zou, Trevor Hastie, and Robert Tibshirani
Ann. Statist. Volume 35, Number 5 (2007), 2173-2192.
http://en.wikipedia.org/wiki/Akaike_information_criterion
http://en.wikipedia.org/wiki/Bayesian_information_criterion
See also
--------
lars_path, LassoLars, LassoLarsCV
"""
def __init__(self, criterion='aic', fit_intercept=True, verbose=False,
normalize=True, precompute='auto', max_iter=500,
eps=np.finfo(np.float).eps, copy_X=True, positive=False):
self.criterion = criterion
self.fit_intercept = fit_intercept
self.positive = positive
self.max_iter = max_iter
self.verbose = verbose
self.normalize = normalize
self.copy_X = copy_X
self.precompute = precompute
self.eps = eps
def fit(self, X, y, copy_X=True):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, shape (n_samples, n_features)
training data.
y : array-like, shape (n_samples,)
target values.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
Returns
-------
self : object
returns an instance of self.
"""
self.fit_path = True
X, y = check_X_y(X, y, y_numeric=True)
X, y, Xmean, ymean, Xstd = LinearModel._center_data(
X, y, self.fit_intercept, self.normalize, self.copy_X)
max_iter = self.max_iter
Gram = self._get_gram()
alphas_, active_, coef_path_, self.n_iter_ = lars_path(
X, y, Gram=Gram, copy_X=copy_X, copy_Gram=True, alpha_min=0.0,
method='lasso', verbose=self.verbose, max_iter=max_iter,
eps=self.eps, return_n_iter=True, positive=self.positive)
n_samples = X.shape[0]
if self.criterion == 'aic':
K = 2 # AIC
elif self.criterion == 'bic':
K = log(n_samples) # BIC
else:
raise ValueError('criterion should be either bic or aic')
R = y[:, np.newaxis] - np.dot(X, coef_path_) # residuals
mean_squared_error = np.mean(R ** 2, axis=0)
df = np.zeros(coef_path_.shape[1], dtype=np.int) # Degrees of freedom
for k, coef in enumerate(coef_path_.T):
mask = np.abs(coef) > np.finfo(coef.dtype).eps
if not np.any(mask):
continue
# get the number of degrees of freedom equal to:
# Xc = X[:, mask]
# Trace(Xc * inv(Xc.T, Xc) * Xc.T) ie the number of non-zero coefs
df[k] = np.sum(mask)
self.alphas_ = alphas_
with np.errstate(divide='ignore'):
self.criterion_ = n_samples * np.log(mean_squared_error) + K * df
n_best = np.argmin(self.criterion_)
self.alpha_ = alphas_[n_best]
self.coef_ = coef_path_[:, n_best]
self._set_intercept(Xmean, ymean, Xstd)
return self
|
bsd-3-clause
|
Batuu13/Neural-Networks
|
NN.py
|
1
|
5911
|
import numpy as np
import matplotlib.pyplot as plt
class Neural:
def __init__(self,lr = 0.0001,lh = 32,lw =4,it = 50000 , decay = 0.9999,precision = 8):
plt.ion()
# lw = Layer Width
# lh = Layer Height
# lr = Learning Rate
# it = Iteration Count
# precision = If the difference is a smaller than first x digits after precision point iteration stops
self.precision = 1 / (10 ** precision)
self.decay = decay
self.expected = None
self.prediction = None
self.data = None
self.lr2 = lr
self.lr = lr
self.layer_height = lh
self.layer_width = lw
self.iteration = it
self.weights = []
# initialize weights
np.random.seed(1)
self.initialize_weights()
def initialize_weights(self):
self.weights.append(2 * np.random.random((785, self.layer_height)) - 1)
for i in range(self.layer_width - 1):
self.weights.append(2 * np.random.random((self.layer_height, self.layer_height)) - 1)
self.weights.append(2 * np.random.random((self.layer_height, 10)) - 1)
# activation function (tanH)
def activate(self, x):
#output = 1 / (1 + np.exp(-x)) # Sigmoid
output = 2 / (1 + np.exp(-2 * x)) - 1 # TanH
return output
# derivative of activation function
def activate_d(self, x):
#return (1 - x) * x # Sigmoid
return 1 - x ** 2 # TanH
def fit(self, data, output):
# Forward Phase
self.data = np.zeros((np.array(data).shape[0],np.array(data).shape[1] + 1))
self.expected = output
bias = 1
for i in range(np.array(data).shape[0]):
self.data[i] = np.append(data[i],bias)
layers = []
layers_error = []
layers_delta = []
# Construct Layers >> input - hidden - output
layers.append(self.activate(np.dot(self.data, self.weights[0])));
for k in range(1,self.layer_width + 1):
layers.append(self.activate(np.dot(layers[k - 1], self.weights[k])))
###################
# Calculate Errors And Deltas >> output - hidden - input
layers_error.append(self.expected - layers[-1])
layers_delta.append(layers_error[0] * self.activate_d(layers[-1]))
for k in range(0,self.layer_width):
layers_error.append(np.dot(layers_delta[k], self.weights[-(k+1)].T))
layers_delta.append(layers_error[k+1] * self.activate_d(layers[-(k+2)]))
###################
# Update Weights
delta = np.dot(self.data.T,layers_delta[-1])
self.weights[0] += np.multiply(delta, self.lr)
for k in range(0,len(self.weights) - 1):
delta = np.dot(layers[k].T,layers_delta[-(k+2)])
self.weights[k+1] += np.multiply(delta, self.lr)
last_error = np.mean(np.abs(layers[-1]))
###################
for i in range(self.iteration):
# Using decay in Learning Rate
self.lr *= self.decay
# Construct Layers >> input - hidden - output
layers[0] = self.activate(np.dot(self.data, self.weights[0]))
for k in range(1, self.layer_width + 1):
layers[k] = self.activate(np.dot(layers[k - 1], self.weights[k]))
###################
# Calculate Errors And Deltas >> output - hidden - input
layers_error[0] = (self.expected - layers[-1])
layers_delta[0] = (layers_error[0] * self.activate_d(layers[-1]))
for k in range(1, self.layer_width + 1):
layers_error[k] = (np.dot(layers_delta[k - 1], self.weights[-(k)].T))
layers_delta[k] = (layers_error[k] * self.activate_d(layers[-(k+1)]))
###################
# Update Weights
delta = np.dot(self.data.T, layers_delta[-1])
self.weights[0] += np.multiply(delta, self.lr)
for k in range(0, len(self.weights) - 1):
delta = np.dot(layers[k].T, layers_delta[-(k + 2)])
self.weights[k + 1] += np.multiply(delta, self.lr)
###################
if i % 2 == 0:
plt.plot(i, np.mean(np.abs(layers[-1])), 'ro')
plt.pause(0.001)
cur_error = np.mean(np.abs(layers[-1]))
if self.precision > abs(last_error - cur_error):
break
last_error = cur_error
self.prediction = layers[-1]
plt.draw()
plt.pause(1)
def _test(self, expected, prediction):
count = 0
for i in range(len(expected)):
if expected[i] == np.argmax(prediction[i]):
count += 1
return count / len(expected)
def get_train_error(self):
'''
Returns training error
:return: training error between 0 and 1
'''
count = 0
for i in range(len(self.expected)):
if np.argmax(self.expected[i]) == np.argmax(self.prediction[i]):
count += 1
return count / len(self.expected)
def predict(self,data_raw,expected_results):
bias = 1
data = np.zeros((np.array(data_raw).shape[0], np.array(data_raw).shape[1] + 1))
bias = 1
for i in range(np.array(data_raw).shape[0]):
data[i] = np.append(data_raw[i], bias)
cur_layer = (self.activate(np.dot(data, self.weights[0])));
for k in range(1,self.layer_width + 1):
cur_layer = (self.activate(np.dot(cur_layer, self.weights[k])))
return self._test(expected_results,cur_layer)
def get_settings(self):
return [self.layer_width,self.layer_height,self.iteration,self.lr2]
|
gpl-3.0
|
sufengniu/GVIN
|
irregular/IL/utils.py
|
1
|
7127
|
import numpy as np
import tensorflow as tf
from scipy.sparse import csr_matrix
import scipy.sparse as sparse
from matplotlib import pyplot as plt
import networkx as nx
# helper methods to print nice table (taken from CGT code)
def fmt_item(x, l):
if isinstance(x, np.ndarray):
assert x.ndim==0
x = x.item()
if isinstance(x, float): rep = "%g"%x
else: rep = str(x)
return " "*(l - len(rep)) + rep
def fmt_row(width, row):
out = " | ".join(fmt_item(x, width) for x in row)
return out
def flipkernel(kern):
return kern[(slice(None, None, -1),) * 2 + (slice(None), slice(None))]
def conv2d_flipkernel(x, k, name=None):
return tf.nn.conv2d(x, flipkernel(k), name=name,
strides=(1, 1, 1, 1), padding='SAME')
def in_bound(x , y, width, height):
if (x >= 0 and x < width and
y >= 0 and y < height ):
return True
else:
return False
def adjecent_matrix(width, height):
w0 = np.zeros([width * height, width * height])
w1 = np.zeros([width * height, width * height])
w2 = np.zeros([width * height, width * height])
w3 = np.zeros([width * height, width * height])
w4 = np.zeros([width * height, width * height])
w5 = np.zeros([width * height, width * height])
w6 = np.zeros([width * height, width * height])
w7 = np.zeros([width * height, width * height])
w8 = np.zeros([width * height, width * height])
for i in range(width * height):
x = i % width
y = i / height
if in_bound(x-1, y-1, width, height):
w0[i][i- width - 1] = 1
if in_bound(x, y-1, width, height):
w1[i][i- width] = 1
if in_bound(x+1, y-1, width, height):
w2[i][i- width + 1] = 1
if in_bound(x-1, y, width, height):
w3[i][i - 1] = 1
w4[i][i] = 1
if in_bound(x+1, y, width, height):
w5[i][i + 1] = 1
if in_bound(x-1, y+1, width, height):
w6[i][i+ width - 1] = 1
if in_bound(x, y+1, width, height):
w7[i][i+ width] = 1
if in_bound(x+1, y+1, width, height):
w8[i][i+ width + 1] = 1
return np.array([w0, w1, w2, w3, w4, w5, w6, w7, w8])
def sparse_rprn(w):
w_indx = np.stack([w.row, w.col])
w_indx = np.transpose(w_indx)
return [w_indx, w.data]
def adjecent_sparse(width, height):
w0 = np.zeros([width * height, width * height])
w1 = np.zeros([width * height, width * height])
w2 = np.zeros([width * height, width * height])
w3 = np.zeros([width * height, width * height])
w4 = np.zeros([width * height, width * height])
w5 = np.zeros([width * height, width * height])
w6 = np.zeros([width * height, width * height])
w7 = np.zeros([width * height, width * height])
w8 = np.zeros([width * height, width * height])
for i in range(width * height):
x = i % width
y = i / height
if in_bound(x-1, y-1, width, height):
w0[i][i- width - 1] = 1
if in_bound(x, y-1, width, height):
w1[i][i- width] = 1
if in_bound(x+1, y-1, width, height):
w2[i][i- width + 1] = 1
if in_bound(x-1, y, width, height):
w3[i][i - 1] = 1
w4[i][i] = 1
if in_bound(x+1, y, width, height):
w5[i][i + 1] = 1
if in_bound(x-1, y+1, width, height):
w6[i][i+ width - 1] = 1
if in_bound(x, y+1, width, height):
w7[i][i+ width] = 1
if in_bound(x+1, y+1, width, height):
w8[i][i+ width + 1] = 1
w = []
w.append(sparse.coo_matrix(w0))
w.append(sparse.coo_matrix(w1))
w.append(sparse.coo_matrix(w2))
w.append(sparse.coo_matrix(w3))
w.append(sparse.coo_matrix(w4))
w.append(sparse.coo_matrix(w5))
w.append(sparse.coo_matrix(w6))
w.append(sparse.coo_matrix(w7))
w.append(sparse.coo_matrix(w8))
w_return = []
for i in range(len(w)):
w_return.append(sparse_rprn(w[i]))
return w_return
def nx_plot(adj, pos, value):
# input: adjacent matrix, position, value map
label = np.arange(len(pos))
G=nx.from_numpy_matrix(adj)
nodes = nx.draw_networkx_nodes(G, pos, node_color=value, node_size=200)
nodes.set_edgecolor('black')
nx.draw_networkx_labels(G, pos, font_size=10)
nx.draw_networkx_edges(G, pos, width=1.0)
plt.ion()
plt.show()
def nx_group(adj, pos, value):
for i in range(10):
plt.figure()
nx_plot(adj, pos, value[i,:,0])
def extract_label(theta_matrix, start_pos, label_pos, discrete=True):
if discrete :
labels = []
for i in range(len(start_pos)):
label = []
tmp = theta_matrix[i].toarray()[start_pos[i], label_pos[i]]
for j in range(len(tmp)):
if tmp[j] <= np.pi/8 or tmp[j] > 15*np.pi/8:
label.append(0)
elif tmp[j] <= 3*np.pi/8 and tmp[j] > np.pi/8:
label.append(1)
elif tmp[j] <= 5*np.pi/8 and tmp[j] > 3*np.pi/8:
label.append(2)
elif tmp[j] <= 7*np.pi/8 and tmp[j] > 5*np.pi/8:
label.append(3)
elif tmp[j] <= 9*np.pi/8 and tmp[j] > 7*np.pi/8:
label.append(4)
elif tmp[j] <= 11*np.pi/8 and tmp[j] > 9*np.pi/8:
label.append(5)
elif tmp[j] <= 13*np.pi/8 and tmp[j] > 11*np.pi/8:
label.append(6)
elif tmp[j] <= 15*np.pi/8 and tmp[j] > 13*np.pi/8:
label.append(7)
labels.append(label)
labels = np.array(labels)
else :
labels = []
for i in range(len(start_pos)):
labels.append(theta_matrix[i].toarray()[start_pos[i], label_pos[i]])
labels = np.array(labels)
return labels
def coord_matrix(coord, coord_diff, config):
coord_matrix = []
for i in range(coord.shape[0]):
coord_tmp = []
for j in range(coord.shape[1]):
coord_rep = np.repeat(coord[i,j,:], config.nodesize).reshape(2,config.nodesize).transpose()
coord_g_rep = np.repeat(coord[i,0,:], config.nodesize).reshape(2,config.nodesize).transpose()
# [X_i, X_j, X_i-X_j, X_g]
coord_tmp.append(np.concatenate([coord_rep, coord[i,:,:], coord_diff[i,j,:,:], coord_g_rep], axis=-1))
# coord_tmp.append(np.concatenate([coord_rep, coord[i,:,:], coord_g_rep], axis=-1))
coord_matrix.append(np.array(coord_tmp))
return np.array(coord_matrix)
# def extract_pred(output, config):
# if config.discrete is True:
# o_ = output.
# else:
# o_ = output.reshape(-1, config.statebatchsize)
# pred = []
# for m in range(config.batchsize):
# buf = theta_train[m, start_train[m,:,0], :]
# for n in range(config.statebatchsize):
# pred.append(np.argmin(np.absolute(o_[m,n] - buf[n,buf[n].nonzero()])))
# pred = np.array(pred)
# pred_label = label_train[i:j].reshape(-1)
# e_ = (np.sum(pred != pred_label))/(1.0*batch_size*config.statebatchsize)
|
mit
|
peterfpeterson/mantid
|
qt/python/mantidqt/widgets/workspacedisplay/table/io.py
|
3
|
2629
|
# Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2019 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
# This file is part of the mantidqt package.
from mantid.api import AnalysisDataService as ADS # noqa
from mantidqt.widgets.workspacedisplay.table.error_column import ErrorColumn
from mantidqt.widgets.workspacedisplay.table.presenter import TableWorkspaceDisplay
class TableWorkspaceDisplayAttributes(object):
# WARNING: If you delete a tag from here instead of adding a new one, it will make old project files obsolete so
# just add an extra tag to the list e.g. ["InstrumentWidget", "IWidget"]
_tags = ["TableWorkspaceDisplayView"]
class TableWorkspaceDisplayEncoder(TableWorkspaceDisplayAttributes):
def __init__(self):
super(TableWorkspaceDisplayEncoder, self).__init__()
def encode(self, obj, _=None):
obj = obj.presenter.view
return {"workspace": obj.presenter.model.ws.name(),
"markedColumns": self._encode_marked_columns(obj.presenter.model.marked_columns),
"windowName": obj.presenter.name}
@staticmethod
def _encode_marked_columns(marked_columns):
as_y_err = []
for y_err in marked_columns.as_y_err:
as_y_err.append({"column": y_err.column, "relatedY": y_err.related_y_column})
return {"as_x": marked_columns.as_x, "as_y": marked_columns.as_y, "as_y_err": as_y_err}
@classmethod
def tags(cls):
return cls._tags
class TableWorkspaceDisplayDecoder(TableWorkspaceDisplayAttributes):
def __init__(self):
super(TableWorkspaceDisplayDecoder, self).__init__()
@staticmethod
def decode(obj_dic, _=None):
import matplotlib.pyplot as plt
pres = TableWorkspaceDisplay(ADS.retrieve(obj_dic["workspace"]), name=obj_dic["windowName"], plot=plt)
pres.model.marked_columns.as_x = obj_dic["markedColumns"]["as_x"]
pres.model.marked_columns.as_y = obj_dic["markedColumns"]["as_y"]
error_columns = []
for y_err in obj_dic["markedColumns"]["as_y_err"]:
error_columns.append(ErrorColumn(column=y_err["column"],
related_y_column=y_err["relatedY"]))
pres.model.marked_columns.as_y_err = error_columns
pres.presenter.update_column_headers()
return pres.container
@classmethod
def tags(cls):
return cls._tags
|
gpl-3.0
|
maropu/spark
|
python/pyspark/pandas/data_type_ops/binary_ops.py
|
2
|
2976
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import TYPE_CHECKING, Union
from pandas.api.types import CategoricalDtype
from pyspark.pandas.base import column_op, IndexOpsMixin
from pyspark.pandas.data_type_ops.base import (
DataTypeOps,
T_IndexOps,
_as_bool_type,
_as_categorical_type,
_as_other_type,
_as_string_type,
)
from pyspark.pandas.typedef import Dtype, pandas_on_spark_type
from pyspark.sql import functions as F
from pyspark.sql.types import BinaryType, BooleanType, StringType
if TYPE_CHECKING:
from pyspark.pandas.indexes import Index # noqa: F401 (SPARK-34943)
from pyspark.pandas.series import Series # noqa: F401 (SPARK-34943)
class BinaryOps(DataTypeOps):
"""
The class for binary operations of pandas-on-Spark objects with BinaryType.
"""
@property
def pretty_name(self) -> str:
return "binaries"
def add(self, left, right) -> Union["Series", "Index"]:
if isinstance(right, IndexOpsMixin) and isinstance(right.spark.data_type, BinaryType):
return column_op(F.concat)(left, right)
elif isinstance(right, bytes):
return column_op(F.concat)(left, F.lit(right))
else:
raise TypeError(
"Concatenation can not be applied to %s and the given type." % self.pretty_name
)
def radd(self, left, right) -> Union["Series", "Index"]:
if isinstance(right, bytes):
return left._with_new_scol(F.concat(F.lit(right), left.spark.column))
else:
raise TypeError(
"Concatenation can not be applied to %s and the given type." % self.pretty_name
)
def astype(self, index_ops: T_IndexOps, dtype: Union[str, type, Dtype]) -> T_IndexOps:
dtype, spark_type = pandas_on_spark_type(dtype)
if isinstance(dtype, CategoricalDtype):
return _as_categorical_type(index_ops, dtype, spark_type)
elif isinstance(spark_type, BooleanType):
return _as_bool_type(index_ops, dtype)
elif isinstance(spark_type, StringType):
return _as_string_type(index_ops, dtype)
else:
return _as_other_type(index_ops, dtype, spark_type)
|
apache-2.0
|
lukeiwanski/tensorflow-opencl
|
tensorflow/contrib/learn/python/learn/dataframe/queues/feeding_functions.py
|
2
|
12905
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Helper functions for enqueuing data from arrays and pandas `DataFrame`s."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import random
import numpy as np
from tensorflow.contrib.learn.python.learn.dataframe.queues import feeding_queue_runner as fqr
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import summary
from tensorflow.python.training import queue_runner
try:
# pylint: disable=g-import-not-at-top
import pandas as pd
HAS_PANDAS = True
except IOError:
# Pandas writes a temporary file during import. If it fails, don't use pandas.
HAS_PANDAS = False
except ImportError:
HAS_PANDAS = False
class _ArrayFeedFn(object):
"""Creates feed dictionaries from numpy arrays."""
def __init__(self,
placeholders,
array,
batch_size,
random_start=False,
seed=None,
num_epochs=None):
if len(placeholders) != 2:
raise ValueError("_array_feed_fn expects 2 placeholders; got {}.".format(
len(placeholders)))
self._placeholders = placeholders
self._array = array
self._max = len(array)
self._batch_size = batch_size
self._num_epochs = num_epochs
self._epoch = 0
random.seed(seed)
self._trav = random.randrange(self._max) if random_start else 0
self._epoch_end = (self._trav - 1) % self._max
def __call__(self):
if self._num_epochs and self._epoch >= self._num_epochs:
raise errors.OutOfRangeError(None, None,
"Already emitted %s epochs." % self._epoch)
integer_indexes = [
j % self._max for j in range(self._trav, self._trav + self._batch_size)
]
if self._epoch_end in integer_indexes:
# after this batch we will have processed self._epoch epochs, possibly
# overshooting a bit to fill out a batch.
self._epoch += 1
self._trav = (integer_indexes[-1] + 1) % self._max
return {
self._placeholders[0]: integer_indexes,
self._placeholders[1]: self._array[integer_indexes]
}
class _OrderedDictNumpyFeedFn(object):
"""Creates feed dictionaries from `OrderedDict`s of numpy arrays."""
def __init__(self,
placeholders,
ordered_dict_of_arrays,
batch_size,
random_start=False,
seed=None,
num_epochs=None):
if len(placeholders) != len(ordered_dict_of_arrays) + 1:
raise ValueError("Expected {} placeholders; got {}.".format(
len(ordered_dict_of_arrays), len(placeholders)))
self._index_placeholder = placeholders[0]
self._col_placeholders = placeholders[1:]
self._ordered_dict_of_arrays = ordered_dict_of_arrays
self._max = len(next(iter(ordered_dict_of_arrays.values())))
for _, v in ordered_dict_of_arrays.items():
if len(v) != self._max:
raise ValueError("Array lengths must match.")
self._batch_size = batch_size
self._num_epochs = num_epochs
self._epoch = 0
random.seed(seed)
self._trav = random.randrange(self._max) if random_start else 0
self._epoch_end = (self._trav - 1) % self._max
def __call__(self):
if self._num_epochs is not None and self._epoch >= self._num_epochs:
raise errors.OutOfRangeError(None, None,
"Already emitted %s epochs." % self._epoch)
indices_end = self._trav + self._batch_size
# _num_epochs will not be <= 0; otherwise, the OutOfRangeError has been
# raised already.
if self._num_epochs is not None and self._epoch == self._num_epochs - 1:
# If num_epochs is set and the feed_fn is on the final epoch, the end
# index of the next batch should not exceed the epoch_end.
if self._trav <= self._epoch_end:
epoch_end = self._epoch_end
else:
epoch_end = self._max + self._epoch_end
indices_end = min(epoch_end + 1, indices_end)
# The integer indices for next batch.
integer_indexes = [j % self._max for j in range(self._trav, indices_end)]
if self._epoch_end in integer_indexes:
# after this batch we will have processed self._epoch epochs, possibly
# overshooting a bit to fill out a batch.
self._epoch += 1
self._trav = (integer_indexes[-1] + 1) % self._max
feed_dict = {self._index_placeholder: integer_indexes}
cols = [
column[integer_indexes]
for column in self._ordered_dict_of_arrays.values()
]
feed_dict.update(dict(zip(self._col_placeholders, cols)))
return feed_dict
class _PandasFeedFn(object):
"""Creates feed dictionaries from pandas `DataFrames`."""
def __init__(self,
placeholders,
dataframe,
batch_size,
random_start=False,
seed=None,
num_epochs=None):
if len(placeholders) != len(dataframe.columns) + 1:
raise ValueError("Expected {} placeholders; got {}.".format(
len(dataframe.columns), len(placeholders)))
self._index_placeholder = placeholders[0]
self._col_placeholders = placeholders[1:]
self._dataframe = dataframe
self._max = len(dataframe)
self._batch_size = batch_size
self._num_epochs = num_epochs
self._epoch = 0
random.seed(seed)
self._trav = random.randrange(self._max) if random_start else 0
self._epoch_end = (self._trav - 1) % self._max
def __call__(self):
if self._num_epochs and self._epoch >= self._num_epochs:
raise errors.OutOfRangeError(None, None,
"Already emitted %s epochs." % self._epoch)
integer_indexes = [
j % self._max for j in range(self._trav, self._trav + self._batch_size)
]
if self._epoch_end in integer_indexes:
# after this batch we will have processed self._epoch epochs, possibly
# overshooting a bit to fill out a batch.
self._epoch += 1
if self._epoch == self._num_epochs:
# trim this batch, so as not to overshoot the last epoch.
batch_end_inclusive = integer_indexes.index(self._epoch_end)
integer_indexes = integer_indexes[:(batch_end_inclusive + 1)]
self._trav = (integer_indexes[-1] + 1) % self._max
result = self._dataframe.iloc[integer_indexes]
cols = [result[col].values for col in result.columns]
feed_dict = dict(zip(self._col_placeholders, cols))
feed_dict[self._index_placeholder] = result.index.values
return feed_dict
def enqueue_data(data,
capacity,
shuffle=False,
min_after_dequeue=None,
num_threads=1,
seed=None,
name="enqueue_input",
enqueue_size=1,
num_epochs=None):
"""Creates a queue filled from a numpy array or pandas `DataFrame`.
Returns a queue filled with the rows of the given (`OrderedDict` of) array
or `DataFrame`. In the case of a pandas `DataFrame`, the first enqueued
`Tensor` corresponds to the index of the `DataFrame`. For (`OrderedDict` of)
numpy arrays, the first enqueued `Tensor` contains the row number.
Args:
data: a numpy `ndarray`, `OrderedDict` of numpy arrays, or pandas
`DataFrame` that will be read into the queue.
capacity: the capacity of the queue.
shuffle: whether or not to shuffle the rows of the array.
min_after_dequeue: minimum number of elements that can remain in the queue
after a dequeue operation. Only used when `shuffle` is true. If not set,
defaults to `capacity` / 4.
num_threads: number of threads used for reading and enqueueing.
seed: used to seed shuffling and reader starting points.
name: a scope name identifying the data.
enqueue_size: the number of rows to enqueue per step.
num_epochs: limit enqueuing to a specified number of epochs, if provided.
Returns:
A queue filled with the rows of the given (`OrderedDict` of) array or
`DataFrame`.
Raises:
TypeError: `data` is not a Pandas `DataFrame`, an `OrderedDict` of numpy
arrays or a numpy `ndarray`.
"""
with ops.name_scope(name):
if isinstance(data, np.ndarray):
types = [dtypes.int64, dtypes.as_dtype(data.dtype)]
queue_shapes = [(), data.shape[1:]]
get_feed_fn = _ArrayFeedFn
elif isinstance(data, collections.OrderedDict):
types = [dtypes.int64] + [
dtypes.as_dtype(col.dtype) for col in data.values()
]
queue_shapes = [()] + [col.shape[1:] for col in data.values()]
get_feed_fn = _OrderedDictNumpyFeedFn
elif HAS_PANDAS and isinstance(data, pd.DataFrame):
types = [
dtypes.as_dtype(dt) for dt in [data.index.dtype] + list(data.dtypes)
]
queue_shapes = [() for _ in types]
get_feed_fn = _PandasFeedFn
else:
raise TypeError(
"data must be either a numpy array or pandas DataFrame if pandas is "
"installed; got {}".format(type(data).__name__))
# TODO(jamieas): TensorBoard warnings for all warnings below once available.
if num_threads > 1 and num_epochs is not None:
logging.warning(
"enqueue_data was called with num_epochs and num_threads > 1. "
"num_epochs is applied per thread, so this will produce more "
"epochs than you probably intend. "
"If you want to limit epochs, use one thread.")
if shuffle and num_threads > 1 and num_epochs is not None:
logging.warning(
"enqueue_data was called with shuffle=True, num_threads > 1, and "
"num_epochs. This will create multiple threads, all reading the "
"array/dataframe in order adding to the same shuffling queue; the "
"results will likely not be sufficiently shuffled.")
if not shuffle and num_threads > 1:
logging.warning(
"enqueue_data was called with shuffle=False and num_threads > 1. "
"This will create multiple threads, all reading the "
"array/dataframe in order. If you want examples read in order, use"
" one thread; if you want multiple threads, enable shuffling.")
if shuffle:
min_after_dequeue = int(capacity / 4 if min_after_dequeue is None else
min_after_dequeue)
queue = data_flow_ops.RandomShuffleQueue(
capacity,
min_after_dequeue,
dtypes=types,
shapes=queue_shapes,
seed=seed)
else:
min_after_dequeue = 0 # just for the summary text
queue = data_flow_ops.FIFOQueue(
capacity, dtypes=types, shapes=queue_shapes)
enqueue_ops = []
feed_fns = []
for i in range(num_threads):
# Note the placeholders have no shapes, so they will accept any
# enqueue_size. enqueue_many below will break them up.
placeholders = [array_ops.placeholder(t) for t in types]
enqueue_ops.append(queue.enqueue_many(placeholders))
seed_i = None if seed is None else (i + 1) * seed
feed_fns.append(
get_feed_fn(
placeholders,
data,
enqueue_size,
random_start=shuffle,
seed=seed_i,
num_epochs=num_epochs))
runner = fqr.FeedingQueueRunner(
queue=queue, enqueue_ops=enqueue_ops, feed_fns=feed_fns)
queue_runner.add_queue_runner(runner)
full = (math_ops.cast(
math_ops.maximum(0, queue.size() - min_after_dequeue),
dtypes.float32) * (1. / (capacity - min_after_dequeue)))
# Note that name contains a '/' at the end so we intentionally do not place
# a '/' after %s below.
summary_name = ("queue/%sfraction_over_%d_of_%d_full" %
(queue.name, min_after_dequeue,
capacity - min_after_dequeue))
summary.scalar(summary_name, full)
return queue
|
apache-2.0
|
CASIANICA/brainDecodingToolbox
|
braincode/prf/tfprf.py
|
1
|
57805
|
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
import os
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ['CUDA_VISIBLE_DEVICES']='0'
import numpy as np
from math import log
from scipy.misc import imresize
import tables
import tensorflow as tf
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from braincode.util import configParser
from braincode.prf import dataio
def reconstructor(gabor_bank, vxl_coding_paras, y):
"""Stimuli reconstructor based on Activation Maximization"""
# var for input stimuli
img = tf.Variable(tf.random_normal([1, 500, 500, 1], stddev=0.001),
name="image")
# config for the gabor filters
gabor_real = np.expand_dims(gabor_bank['gabor_real'], 2)
gabor_imag = np.expand_dims(gabor_bank['gabor_imag'], 2)
real_conv = tf.nn.conv2d(img, gabor_real, strides=[1, 1, 1, 1],
padding='SAME')
imag_conv = tf.nn.conv2d(img, gabor_imag, strides=[1, 1, 1, 1],
padding='SAME')
gabor_energy = tf.sqrt(tf.square(real_conv) + tf.square(imag_conv))
# reshape gabor energy for pRF masking
gabor_vtr = tf.reshape(gabor_energy, [250000, 72])
# weighted by voxel encoding models
vxl_masks = vxl_coding_paras['masks']
vxl_wts = vxl_coding_paras['wts']
vxl_bias = vxl_coding_paras['bias']
# masked by pooling fields
vxl_masks = vxl_masks.reshape(-1, 250000)
vxl_feats = tf.matmul(vxl_masks, gabor_vtr)
vxl_wt_feats = tf.multiply(vxl_feats, vxl_wts)
vxl_rsp = tf.reduce_sum(vxl_wt_feats, axis=1)
vxl_pred = vxl_rsp - vxl_bias
# input config
vxl_real = tf.placeholder(tf.float32,
shape=(vxl_coding_paras['bias'].shape[0],))
error = tf.reduce_mean(tf.square(vxl_pred - vxl_real))
opt = tf.train.GradientDescentOptimizer(0.5)
vars_x = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, "image")
solver = opt.minimize(error, var_list = vars_x)
# training
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.95
sess = tf.Session(config=config)
sess.run(tf.global_variables_initializer())
print y[:,2].shape
for step in range(500):
_, error_curr, reconstructed_img = sess.run([solver, error, img], feed_dict={vxl_real: y[:, 2]})
if step % 100 == 0:
print('Iter: {}; loss: {:.4}'.format(step, error_curr))
fig=plt.figure()
plt.imshow(reconstructed_img.reshape(500, 500))
plt.savefig('recons'+str(step)+'.png')
plt.close(fig)
return reconstructed_img
def model_test(input_imgs, gabor_bank, vxl_coding_paras):
"""pRF encoding model tests."""
# var for input stimuli
img = tf.placeholder("float", shape=[None, 500, 500, 1])
# config for the gabor filters
gabor_real = np.expand_dims(gabor_bank['gabor_real'], 2)
gabor_imag = np.expand_dims(gabor_bank['gabor_imag'], 2)
real_conv = tf.nn.conv2d(img, gabor_real, strides=[1, 1, 1, 1],
padding='SAME')
imag_conv = tf.nn.conv2d(img, gabor_imag, strides=[1, 1, 1, 1],
padding='SAME')
gabor_energy = tf.sqrt(tf.square(real_conv) + tf.square(imag_conv))
# reshape gabor energy for pRF masking
gabor_vtr = tf.reshape(gabor_energy, [250000, 72])
# weighted by voxel encoding models
vxl_masks = vxl_coding_paras['masks']
vxl_wts = vxl_coding_paras['wts']
vxl_bias = vxl_coding_paras['bias']
# masked by pooling fields
vxl_masks = vxl_masks.reshape(-1, 250000)
vxl_feats = tf.matmul(vxl_masks, gabor_vtr)
vxl_wt_feats = tf.multiply(vxl_feats, vxl_wts)
vxl_rsp = tf.reduce_sum(vxl_wt_feats, axis=1)
vxl_out = vxl_rsp - vxl_bias
with tf.Session() as sess:
sess.run(tf.initialize_all_variables())
for i in range(input_imgs.shape[2]):
x = input_imgs[..., i].T
x = np.expand_dims(x, 0)
x = np.expand_dims(x, 3)
resp = sess.run(vxl_out, feed_dict={img: x})
print resp
def variable_summaries(var):
"""Attach a lot of summaries to Tensor for TensorBoard visualization."""
with tf.name_scope('summaries'):
mean = tf.reduce_mean(var)
tf.summary.scalar('mean', mean)
stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
tf.summary.scalar('stddev', stddev)
tf.summary.scalar('max', tf.reduce_max(var))
tf.summary.scalar('min', tf.reduce_min(var))
tf.summary.histogram('histogram', var)
def tfprf_laplacian_bn(input_imgs, vxl_rsp, gabor_bank, vxl_dir):
"""laplacian regularized pRF model."""
# get image mask
img_m = np.mean(input_imgs, axis=2)
img_mask = imresize(img_m, (250, 250))
# resized image value range: 0-255
img_mask = np.reshape(img_mask<170, [-1])
graph = tf.Graph()
with graph.as_default():
# vars for input data
with tf.name_scope('input'):
img = tf.placeholder("float", [None, 500, 500, 1], name='input-img')
rsp_ = tf.placeholder("float", [None,], name='vxl-rsp')
# var for feature pooling field
with tf.name_scope('pooling-field'):
fpf_kernel = tf.random_normal([1, 250, 250, 1], stddev=0.01)
blur = np.array([[1.0/256, 4.0/256, 6.0/256, 4.0/256, 1.0/256],
[4.0/256, 16.0/256, 24.0/256, 16.0/256, 4.0/256],
[6.0/256, 24.0/256, 36.0/256, 24.0/256, 6.0/256],
[4.0/256, 16.0/256, 24.0/256, 16.0/256, 4.0/256],
[1.0/256, 4.0/256, 6.0/256, 4.0/256, 1.0/256]])
blur = np.expand_dims(np.expand_dims(blur, 2), 3)
fpf_kernel = tf.nn.conv2d(fpf_kernel, blur, strides=[1, 1, 1, 1],
padding='SAME')
fpf = tf.Variable(tf.reshape(fpf_kernel, [250, 250]), name='fpf')
flat_fpf = tf.transpose(tf.boolean_mask(tf.reshape(tf.nn.relu(fpf),
(62500, 1)),
img_mask), [1, 0])
# gabor features extraction
with tf.name_scope('feature-extract'):
feat_vtr = []
for i in range(9):
# config for the gabor filters
gabor_real = np.expand_dims(gabor_bank['f%s_real'%(i+1)], 2)
gabor_imag = np.expand_dims(gabor_bank['f%s_imag'%(i+1)], 2)
rconv = tf.nn.conv2d(img, gabor_real, strides=[1, 2, 2, 1],
padding='SAME')
iconv = tf.nn.conv2d(img, gabor_imag, strides=[1, 2, 2, 1],
padding='SAME')
gabor_energy = tf.sqrt(tf.square(rconv) + tf.square(iconv))
# normalization across orientations
gabor_energy = tf.nn.local_response_normalization(gabor_energy,
depth_radius=7, bias=1, alpha=1, beta=0.5)
gabor_energy = tf.transpose(gabor_energy, perm=[1, 2, 3, 0])
gabor_energy = tf.boolean_mask(tf.reshape(gabor_energy,
[62500, -1]), img_mask)
# get feature summary from pooling field
gabor_feat = tf.reshape(tf.matmul(flat_fpf, gabor_energy),
(8, -1))
feat_vtr.append(gabor_feat)
# concatenate gabor features within fpf
vxl_feats = tf.concat(feat_vtr, 0)
# vars for feature weights
with tf.name_scope('weighted-features'):
b = tf.Variable(tf.constant(0.01, shape=[1]), name='bias')
variable_summaries(b)
w = tf.Variable(tf.constant(0.01, shape=[1, 72]), name='weights')
variable_summaries(w)
vxl_wt_feats = tf.matmul(w, vxl_feats)
rsp = tf.reshape(vxl_wt_feats + b, [-1])
# loss defination
with tf.name_scope('loss'):
# calculate fitting error
error = tf.reduce_mean(tf.square(rsp - rsp_))
# laplacian regularization
laplacian_kernel = np.array([[0, -1, 0],
[-1, 4, -1],
[0, -1, 0]])
laplacian_kernel = np.expand_dims(laplacian_kernel, 2)
laplacian_kernel = np.expand_dims(laplacian_kernel, 3)
fpf_shadow = tf.expand_dims(tf.expand_dims(fpf, 0), 3)
laplacian_error = tf.reduce_sum(tf.square(tf.nn.conv2d(fpf_shadow,
laplacian_kernel,
strides=[1, 1, 1, 1],
padding='VALID')))
# get total error
total_error = 10*error + 0.5*laplacian_error
tf.summary.scalar('fitting-loss', error)
tf.summary.scalar('total-loss', total_error)
# for model saving
saver = tf.train.Saver()
with tf.Session(graph=graph) as sess:
vars_x = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
solver = tf.train.AdamOptimizer(0.0005).minimize(total_error,
var_list = vars_x)
# merge summaries
merged = tf.summary.merge_all()
train_writer = tf.summary.FileWriter(os.path.join(vxl_dir, 'train'),
sess.graph)
#test_writer = tf.summary.FileWriter('./test')
sess.run(tf.global_variables_initializer())
# data splitting
input_imgs = input_imgs - np.expand_dims(img_m, 2)
sample_num = input_imgs.shape[2]
train_imgs = input_imgs[..., :int(sample_num*0.9)]
val_imgs = input_imgs[..., int(sample_num*0.9):]
val_imgs = np.transpose(val_imgs, (2, 0, 1))
val_imgs = np.expand_dims(val_imgs, 3)
train_rsp = vxl_rsp[:int(sample_num*0.9)]
val_rsp = vxl_rsp[int(sample_num*0.9):]
#print train_imgs.shape
#print val_imgs.shape
#print train_rsp.shape
#print val_rsp.shape
# model training
batch_size = 9
index_in_epoch = 0
epochs_completed = 0
min_err = None
err_patience_1 = 0.0025
err_patience_2 = 0.001
err_patience = err_patience_1
patience_cnt = 0
patience = 6
training_stage = 1
iter_num = 0
val_loss = []
while 1:
start = index_in_epoch
if epochs_completed==0 and start==0:
perm0 = np.arange(train_imgs.shape[2])
np.random.shuffle(perm0)
shuffle_imgs = train_imgs[..., perm0]
shuffle_rsp = train_rsp[perm0]
# go to next epoch
if start + batch_size > train_imgs.shape[2]:
# finish epoch
epochs_completed += 1
# get the rest examples in this epoch
rest_num_examples = int(train_imgs.shape[2]) - start
img_rest_part = shuffle_imgs[..., start:train_imgs.shape[2]]
rsp_rest_part = shuffle_rsp[start:train_imgs.shape[2]]
# shuffle the data
perm = np.arange(train_imgs.shape[2])
np.random.shuffle(perm)
shuffle_imgs = train_imgs[..., perm]
shuffle_rsp = train_rsp[perm]
# start next epoch
start = 0
index_in_epoch = batch_size - rest_num_examples
end = index_in_epoch
img_new_part = shuffle_imgs[..., start:end]
rsp_new_part = shuffle_rsp[start:end]
img_batch = np.concatenate((img_rest_part,img_new_part), axis=2)
img_batch = np.transpose(img_batch, (2, 0, 1))
img_batch = np.expand_dims(img_batch, 3)
batch = [img_batch,
np.concatenate((rsp_rest_part, rsp_new_part), axis=0)]
else:
index_in_epoch += batch_size
end = index_in_epoch
img_batch = shuffle_imgs[..., start:end]
img_batch = np.transpose(img_batch, (2, 0, 1))
img_batch = np.expand_dims(img_batch, 3)
batch = [img_batch, shuffle_rsp[start:end]]
_, summary, step_error, step_fpf = sess.run(
[solver, merged, total_error, fpf],
feed_dict={img: batch[0], rsp_: batch[1]})
train_writer.add_summary(summary, iter_num)
if (iter_num+1)%175==0:
print 'Ep %s'%((iter_num+1)/175)
print 'Training Error: %s'%(step_error)
rsp_err = sess.run(error, feed_dict={img: batch[0],
rsp_: batch[1]})
#l2_err = sess.run(l2_error, feed_dict={img:batch[0],
# rsp_: batch[1]})
lap_err = sess.run(laplacian_error, feed_dict={img:batch[0],
rsp_: batch[1]})
#l1_err = sess.run(l1_error, feed_dict={img:batch[0],
# rsp_: batch[1]})
print 'Rsp error: %s'%(rsp_err)
#print 'L2 error: %s'%(l2_err)
print 'Laplacian error: %s'%(lap_err)
#print 'L1 error: %s'%(l1_err)
# model validation
pred_val_rsp = np.zeros(175)
for j in range(35):
part_rsp = sess.run(rsp,
feed_dict={img: val_imgs[(j*5):(j*5+5)],
rsp_: val_rsp[(j*5):(j*5+5)]})
pred_val_rsp[(j*5):(j*5+5)] = part_rsp
val_err = np.mean(np.square(pred_val_rsp - val_rsp))
print 'Validation Error: %s'%(val_err)
val_loss.append(val_err)
#val_corr = np.corrcoef(pred_val_rsp, val_rsp)[0, 1]
#print 'Validation Corr: %s'%(val_corr)
if iter_num==174:
min_err = val_err
else:
if (min_err - val_err) >= err_patience:
min_err = val_err
patience_cnt = 0
else:
patience_cnt += 1
# stop signal
if patience_cnt > patience:
if training_stage==1 and min_err<0.999:
training_stage = 2
patience_cnt = 0
err_patience = err_patience_2
print '################################'
print 'Enter training stage 2 ...'
else:
print 'Early stopping - step %s'%(iter_num)
# plot fpf
fig, ax = plt.subplots()
cax = ax.imshow(step_fpf, cmap='gray')
fig.colorbar(cax)
plt.savefig(os.path.join(vxl_dir,
'fpf_epoch%s.png'%((iter_num+1)/175)))
plt.close(fig)
# save model
saver.save(sess, os.path.join(vxl_dir, 'prf_model'),
global_step=(iter_num - (patience+1)*175))
saver.save(sess, os.path.join(vxl_dir, 'prf_model'),
global_step=iter_num, write_meta_graph=False)
# save final validation loss
with open(os.path.join(vxl_dir, 'val_loss.txt'), 'w+') as f:
val_idx = -1 * patience - 2
f.write('%s\n'%(val_loss[val_idx]))
break
iter_num += 1
train_writer.close()
#test_writer.close()
return
def tfprf_laplacian(input_imgs, vxl_rsp, gabor_bank, vxl_dir):
"""laplacian regularized pRF model."""
# get image mask
img_m = np.mean(input_imgs, axis=2)
img_mask = imresize(img_m, (250, 250))
# resized image value range: 0-255
img_mask = np.reshape(img_mask<170, [-1])
graph = tf.Graph()
with graph.as_default():
# vars for input data
with tf.name_scope('input'):
img = tf.placeholder("float", [None, 500, 500, 1], name='input-img')
rsp_ = tf.placeholder("float", [None,], name='vxl-rsp')
# var for feature pooling field
with tf.name_scope('pooling-field'):
fpf_kernel = tf.random_normal([1, 250, 250, 1], stddev=0.01)
blur = np.array([[1.0/256, 4.0/256, 6.0/256, 4.0/256, 1.0/256],
[4.0/256, 16.0/256, 24.0/256, 16.0/256, 4.0/256],
[6.0/256, 24.0/256, 36.0/256, 24.0/256, 6.0/256],
[4.0/256, 16.0/256, 24.0/256, 16.0/256, 4.0/256],
[1.0/256, 4.0/256, 6.0/256, 4.0/256, 1.0/256]])
blur = np.expand_dims(np.expand_dims(blur, 2), 3)
fpf_kernel = tf.nn.conv2d(fpf_kernel, blur, strides=[1, 1, 1, 1],
padding='SAME')
fpf = tf.Variable(tf.reshape(fpf_kernel, [250, 250]), name='fpf')
flat_fpf = tf.transpose(tf.boolean_mask(tf.reshape(tf.nn.relu(fpf),
(62500, 1)),
img_mask), [1, 0])
# gabor features extraction
with tf.name_scope('feature-extract'):
feat_vtr = []
# batch normalization vars
#bscale = []
#bbeta = []
#for i in range(9):
# bscale.append(tf.Variable(1.0))
# bbeta.append(tf.Variable(1.0))
for i in range(9):
# config for the gabor filters
gabor_real = np.expand_dims(gabor_bank['f%s_real'%(i+1)], 2)
gabor_imag = np.expand_dims(gabor_bank['f%s_imag'%(i+1)], 2)
rconv = tf.nn.conv2d(img, gabor_real, strides=[1, 2, 2, 1],
padding='SAME')
iconv = tf.nn.conv2d(img, gabor_imag, strides=[1, 2, 2, 1],
padding='SAME')
gabor_energy = tf.sqrt(tf.square(rconv) + tf.square(iconv))
# normalization across orientations
#batch_mean, batch_var = tf.nn.moments(gabor_energy, [3],
# keep_dims=True)
#gabor_energy = (gabor_energy - batch_mean) / tf.sqrt(batch_var + 1e-3)
#gabor_energy = bscale[i]*gabor_energy + bbeta[i]
##gabor_energy = tf.nn.batch_normalization(gabor_energy,
## batch_mean,
## batch_var,
## bbeta[i],
## bscale[i],
## 1e-3)
gabor_energy = tf.nn.local_response_normalization(gabor_energy, depth_radius=7, bias=1, alpha=1, beta=0.5)
gabor_energy = tf.transpose(gabor_energy, perm=[1, 2, 3, 0])
gabor_energy = tf.boolean_mask(tf.reshape(gabor_energy,
[62500, -1]), img_mask)
# get feature summary from pooling field
gabor_feat = tf.reshape(tf.matmul(flat_fpf, gabor_energy),
(8, -1))
feat_vtr.append(gabor_feat)
# concatenate gabor features within fpf
vxl_feats = tf.concat(feat_vtr, 0)
# vars for feature weights
with tf.name_scope('weighted-features'):
b = tf.Variable(tf.constant(0.01, shape=[1]), name='bias')
variable_summaries(b)
w = tf.Variable(tf.constant(0.01, shape=[1, 72]), name='weights')
variable_summaries(w)
vxl_wt_feats = tf.matmul(w, vxl_feats)
rsp = tf.reshape(vxl_wt_feats + b, [-1])
# loss defination
with tf.name_scope('loss'):
# calculate fitting error
error = tf.reduce_mean(tf.square(rsp - rsp_))
# laplacian regularization
laplacian_kernel = np.array([[0, -1, 0],
[-1, 4, -1],
[0, -1, 0]])
laplacian_kernel = np.expand_dims(laplacian_kernel, 2)
laplacian_kernel = np.expand_dims(laplacian_kernel, 3)
fpf_shadow = tf.expand_dims(tf.expand_dims(fpf, 0), 3)
laplacian_error = tf.reduce_sum(tf.square(tf.nn.conv2d(fpf_shadow,
laplacian_kernel,
strides=[1, 1, 1, 1],
padding='VALID')))
# get total error
total_error = 10*error + 0.1*laplacian_error
tf.summary.scalar('fitting-loss', error)
tf.summary.scalar('total-loss', total_error)
# for model saving
saver = tf.train.Saver()
with tf.Session(graph=graph) as sess:
vars_x = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
solver = tf.train.AdamOptimizer(0.0003).minimize(total_error,
var_list = vars_x)
# merge summaries
merged = tf.summary.merge_all()
train_writer = tf.summary.FileWriter(os.path.join(vxl_dir, 'train'),
sess.graph)
#test_writer = tf.summary.FileWriter('./test')
sess.run(tf.global_variables_initializer())
# data splitting
input_imgs = input_imgs - np.expand_dims(img_m, 2)
sample_num = input_imgs.shape[2]
train_imgs = input_imgs[..., :int(sample_num*0.9)]
val_imgs = input_imgs[..., int(sample_num*0.9):]
val_imgs = np.transpose(val_imgs, (2, 0, 1))
val_imgs = np.expand_dims(val_imgs, 3)
train_rsp = vxl_rsp[:int(sample_num*0.9)]
val_rsp = vxl_rsp[int(sample_num*0.9):]
#print train_imgs.shape
#print val_imgs.shape
#print train_rsp.shape
#print val_rsp.shape
# model training
batch_size = 9
index_in_epoch = 0
epochs_completed = 0
min_err = None
patience_cnt = 0
patience = 6
iter_num = 0
val_loss = []
while 1:
start = index_in_epoch
if epochs_completed==0 and start==0:
perm0 = np.arange(train_imgs.shape[2])
np.random.shuffle(perm0)
shuffle_imgs = train_imgs[..., perm0]
shuffle_rsp = train_rsp[perm0]
# go to next epoch
if start + batch_size > train_imgs.shape[2]:
# finish epoch
epochs_completed += 1
# get the rest examples in this epoch
rest_num_examples = int(train_imgs.shape[2]) - start
img_rest_part = shuffle_imgs[..., start:train_imgs.shape[2]]
rsp_rest_part = shuffle_rsp[start:train_imgs.shape[2]]
# shuffle the data
perm = np.arange(train_imgs.shape[2])
np.random.shuffle(perm)
shuffle_imgs = train_imgs[..., perm]
shuffle_rsp = train_rsp[perm]
# start next epoch
start = 0
index_in_epoch = batch_size - rest_num_examples
end = index_in_epoch
img_new_part = shuffle_imgs[..., start:end]
rsp_new_part = shuffle_rsp[start:end]
img_batch = np.concatenate((img_rest_part,img_new_part), axis=2)
img_batch = np.transpose(img_batch, (2, 0, 1))
img_batch = np.expand_dims(img_batch, 3)
batch = [img_batch,
np.concatenate((rsp_rest_part, rsp_new_part), axis=0)]
else:
index_in_epoch += batch_size
end = index_in_epoch
img_batch = shuffle_imgs[..., start:end]
img_batch = np.transpose(img_batch, (2, 0, 1))
img_batch = np.expand_dims(img_batch, 3)
batch = [img_batch, shuffle_rsp[start:end]]
_, summary, step_error, step_fpf = sess.run(
[solver, merged, total_error, fpf],
feed_dict={img: batch[0], rsp_: batch[1]})
train_writer.add_summary(summary, iter_num)
if (iter_num+1)%175==0:
print 'Ep %s'%((iter_num+1)/175)
print 'Training Error: %s'%(step_error)
rsp_err = sess.run(error, feed_dict={img: batch[0],
rsp_: batch[1]})
#l2_err = sess.run(l2_error, feed_dict={img:batch[0],
# rsp_: batch[1]})
lap_err = sess.run(laplacian_error, feed_dict={img:batch[0],
rsp_: batch[1]})
#l1_err = sess.run(l1_error, feed_dict={img:batch[0],
# rsp_: batch[1]})
print 'Rsp error: %s'%(rsp_err)
#print 'L2 error: %s'%(l2_err)
print 'Laplacian error: %s'%(lap_err)
#print 'L1 error: %s'%(l1_err)
# model validation
pred_val_rsp = np.zeros(175)
for j in range(35):
part_rsp = sess.run(rsp,
feed_dict={img: val_imgs[(j*5):(j*5+5)],
rsp_: val_rsp[(j*5):(j*5+5)]})
pred_val_rsp[(j*5):(j*5+5)] = part_rsp
val_err = np.mean(np.square(pred_val_rsp - val_rsp))
print 'Validation Error: %s'%(val_err)
val_loss.append(val_err)
#val_corr = np.corrcoef(pred_val_rsp, val_rsp)[0, 1]
#print 'Validation Corr: %s'%(val_corr)
if iter_num==174:
min_err = val_err
else:
if (min_err - val_err) >= 0.0025:
min_err = val_err
patience_cnt = 0
else:
patience_cnt += 1
# stop signal
if patience_cnt > patience:
print 'Early stopping - step %s'%(iter_num)
# plot fpf
fig, ax = plt.subplots()
cax = ax.imshow(step_fpf, cmap='gray')
fig.colorbar(cax)
plt.savefig(os.path.join(vxl_dir,
'fpf_epoch%s.png'%((iter_num+1)/175)))
plt.close(fig)
# save model
saver.save(sess, os.path.join(vxl_dir, 'prf_model'),
global_step=(iter_num - (patience+1)*175))
saver.save(sess, os.path.join(vxl_dir, 'prf_model'),
global_step=iter_num, write_meta_graph=False)
# save final validation loss
with open(os.path.join(vxl_dir, 'val_loss.txt'), 'w+') as f:
val_idx = -1 * patience - 2
f.write('%s\n'%(val_loss[val_idx]))
break
iter_num += 1
train_writer.close()
#test_writer.close()
return
def tfprf_laplacian_refine(input_imgs, vxl_rsp, gabor_bank, vxl_dir):
"""laplacian regularized pRF model."""
# get image mask
img_m = np.mean(input_imgs, axis=2)
img_mask = imresize(img_m, (250, 250))
# resized image value range: 0-255
img_mask = np.reshape(img_mask<170, [-1])
graph = tf.Graph()
with graph.as_default():
# vars for input data
with tf.name_scope('input'):
img = tf.placeholder("float", [None, 500, 500, 1], name='input-img')
rsp_ = tf.placeholder("float", [None,], name='vxl-rsp')
# var for feature pooling field
with tf.name_scope('pooling-field'):
fpf_kernel = tf.random_normal([1, 250, 250, 1], stddev=0.01)
fpf = tf.Variable(tf.reshape(fpf_kernel, [250, 250]), name='fpf')
flat_fpf = tf.transpose(tf.boolean_mask(tf.reshape(tf.nn.relu(fpf),
(62500, 1)),
img_mask), [1, 0])
# gabor features extraction
with tf.name_scope('feature-extract'):
feat_vtr = []
for i in range(9):
# config for the gabor filters
gabor_real = np.expand_dims(gabor_bank['f%s_real'%(i+1)], 2)
gabor_imag = np.expand_dims(gabor_bank['f%s_imag'%(i+1)], 2)
rconv = tf.nn.conv2d(img, gabor_real, strides=[1, 2, 2, 1],
padding='SAME')
iconv = tf.nn.conv2d(img, gabor_imag, strides=[1, 2, 2, 1],
padding='SAME')
gabor_energy = tf.sqrt(tf.square(rconv) + tf.square(iconv))
gabor_energy = tf.transpose(gabor_energy, perm=[1, 2, 3, 0])
gabor_energy = tf.boolean_mask(tf.reshape(gabor_energy,
[62500, -1]), img_mask)
# get feature summary from pooling field
gabor_feat = tf.reshape(tf.matmul(flat_fpf, gabor_energy),
(8, -1))
feat_vtr.append(gabor_feat)
# concatenate gabor features within fpf
vxl_feats = tf.concat(feat_vtr, 0)
# vars for feature weights
with tf.name_scope('weighted-features'):
b = tf.Variable(tf.constant(0.01, shape=[1]), name='bias')
variable_summaries(b)
w = tf.Variable(tf.constant(0.01, shape=[1, 72]), name='weights')
variable_summaries(w)
vxl_wt_feats = tf.matmul(w, vxl_feats)
rsp = tf.reshape(vxl_wt_feats + b, [-1])
# loss defination
with tf.name_scope('loss'):
# calculate fitting error
error = tf.reduce_mean(tf.square(rsp - rsp_))
# laplacian regularization
laplacian_kernel = np.array([[0, -1, 0],
[-1, 4, -1],
[0, -1, 0]])
laplacian_kernel = np.expand_dims(laplacian_kernel, 2)
laplacian_kernel = np.expand_dims(laplacian_kernel, 3)
fpf_shadow = tf.expand_dims(tf.expand_dims(fpf, 0), 3)
laplacian_error = tf.reduce_sum(tf.square(tf.nn.conv2d(fpf_shadow,
laplacian_kernel,
strides=[1, 1, 1, 1],
padding='VALID')))
# get total error
total_error = 10*error + 0.1*laplacian_error
tf.summary.scalar('fitting-loss', error)
tf.summary.scalar('total-loss', total_error)
# for model saving
saver = tf.train.Saver()
with tf.Session(graph=graph) as sess:
# find the optimal model and restore it
file_list = os.listdir(vxl_dir)
file_list = [item for item in file_list if item[-5:]=='index']
iter_num = [int(item.split('.')[0].split('-')[1]) for item in file_list]
sel_iter_num = min(iter_num)
model_path = os.path.join(vxl_dir, 'prf_model-%s'%(sel_iter_num))
saver = tf.train.Saver()
saver.restore(sess, model_path)
# create refine dir
refine_dir = os.path.join(vxl_dir, 'refine')
if not os.path.exists(refine_dir):
os.makedirs(refine_dir, 0755)
# refine model
vars_x = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
adam_opt = tf.train.AdamOptimizer(0.0003)
solver = adam_opt.minimize(total_error, var_list = vars_x)
# merge summaries
merged = tf.summary.merge_all()
train_writer = tf.summary.FileWriter(os.path.join(refine_dir, 'train'),
sess.graph)
#test_writer = tf.summary.FileWriter('./test')
adam_vars = [adam_opt.get_slot(var, name)
for name in adam_opt.get_slot_names()
for var in vars_x if var is not None]
#print adam_vars
adam_vars.extend(list(adam_opt._get_beta_accumulators()))
sess.run(tf.variables_initializer(adam_vars))
# data splitting
input_imgs = input_imgs - np.expand_dims(img_m, 2)
sample_num = input_imgs.shape[2]
train_imgs = input_imgs[..., :int(sample_num*0.9)]
val_imgs = input_imgs[..., int(sample_num*0.9):]
val_imgs = np.transpose(val_imgs, (2, 0, 1))
val_imgs = np.expand_dims(val_imgs, 3)
train_rsp = vxl_rsp[:int(sample_num*0.9)]
val_rsp = vxl_rsp[int(sample_num*0.9):]
# model training
batch_size = 9
index_in_epoch = 0
epochs_completed = 0
min_err = None
patience_cnt = 0
if sel_iter_num>174:
patience = 6
else:
patience = 0
iter_num = 0
val_loss = []
while 1:
start = index_in_epoch
if epochs_completed==0 and start==0:
perm0 = np.arange(train_imgs.shape[2])
np.random.shuffle(perm0)
shuffle_imgs = train_imgs[..., perm0]
shuffle_rsp = train_rsp[perm0]
# go to next epoch
if start + batch_size > train_imgs.shape[2]:
# finish epoch
epochs_completed += 1
# get the rest examples in this epoch
rest_num_examples = int(train_imgs.shape[2]) - start
img_rest_part = shuffle_imgs[..., start:train_imgs.shape[2]]
rsp_rest_part = shuffle_rsp[start:train_imgs.shape[2]]
# shuffle the data
perm = np.arange(train_imgs.shape[2])
np.random.shuffle(perm)
shuffle_imgs = train_imgs[..., perm]
shuffle_rsp = train_rsp[perm]
# start next epoch
start = 0
index_in_epoch = batch_size - rest_num_examples
end = index_in_epoch
img_new_part = shuffle_imgs[..., start:end]
rsp_new_part = shuffle_rsp[start:end]
img_batch = np.concatenate((img_rest_part,img_new_part), axis=2)
img_batch = np.transpose(img_batch, (2, 0, 1))
img_batch = np.expand_dims(img_batch, 3)
batch = [img_batch,
np.concatenate((rsp_rest_part, rsp_new_part), axis=0)]
else:
index_in_epoch += batch_size
end = index_in_epoch
img_batch = shuffle_imgs[..., start:end]
img_batch = np.transpose(img_batch, (2, 0, 1))
img_batch = np.expand_dims(img_batch, 3)
batch = [img_batch, shuffle_rsp[start:end]]
_, summary, step_error, step_fpf = sess.run(
[solver, merged, total_error, fpf],
feed_dict={img: batch[0], rsp_: batch[1]})
train_writer.add_summary(summary, iter_num)
if (iter_num+1)%175==0:
print 'Ep %s'%((iter_num+1)/175)
print 'Training Error: %s'%(step_error)
rsp_err = sess.run(error, feed_dict={img: batch[0],
rsp_: batch[1]})
lap_err = sess.run(laplacian_error, feed_dict={img:batch[0],
rsp_: batch[1]})
print 'Rsp error: %s'%(rsp_err)
print 'Laplacian error: %s'%(lap_err)
# model validation
pred_val_rsp = np.zeros(175)
for j in range(35):
part_rsp = sess.run(rsp,
feed_dict={img: val_imgs[(j*5):(j*5+5)],
rsp_: val_rsp[(j*5):(j*5+5)]})
pred_val_rsp[(j*5):(j*5+5)] = part_rsp
val_err = np.mean(np.square(pred_val_rsp - val_rsp))
print 'Validation Error: %s'%(val_err)
val_loss.append(val_err)
if iter_num==174:
min_err = val_err
else:
if (min_err - val_err) >= 0.001:
min_err = val_err
patience_cnt = 0
else:
patience_cnt += 1
# stop signal
if patience_cnt > patience:
print 'Early stopping - step %s'%(iter_num)
# plot fpf
fig, ax = plt.subplots()
cax = ax.imshow(step_fpf, cmap='gray')
fig.colorbar(cax)
plt.savefig(os.path.join(refine_dir,
'fpf_epoch%s.png'%((iter_num+1)/175)))
plt.close(fig)
# save model
saver.save(sess, os.path.join(refine_dir, 'prf_model'),
global_step=(iter_num - (patience+1)*175))
saver.save(sess, os.path.join(refine_dir, 'prf_model'),
global_step=iter_num, write_meta_graph=False)
# save final validation loss
with open(os.path.join(refine_dir,'val_loss.txt'),'w+') as f:
val_idx = -1 * patience - 2
f.write('%s\n'%(val_loss[val_idx]))
break
iter_num += 1
train_writer.close()
#test_writer.close()
return
def tfprf_test(train_imgs, val_imgs, vxl_rsp, gabor_bank, vxl_dir):
"""Test laplacian regularized pRF model on test dataset."""
# get image mask
img_m = np.mean(train_imgs, axis=2)
img_mask = imresize(img_m, (250, 250))
# resized image value range: 0-255
img_mask = np.reshape(img_mask<170, [-1])
graph = tf.Graph()
with graph.as_default():
# vars for input data
with tf.name_scope('input'):
img = tf.placeholder("float", [None, 500, 500, 1], name='input-img')
rsp_ = tf.placeholder("float", [None,], name='vxl-rsp')
# var for feature pooling field
with tf.name_scope('pooling-field'):
fpf_kernel = tf.random_normal([1, 250, 250, 1], stddev=0.01)
blur = np.array([[1.0/256, 4.0/256, 6.0/256, 4.0/256, 1.0/256],
[4.0/256, 16.0/256, 24.0/256, 16.0/256, 4.0/256],
[6.0/256, 24.0/256, 36.0/256, 24.0/256, 6.0/256],
[4.0/256, 16.0/256, 24.0/256, 16.0/256, 4.0/256],
[1.0/256, 4.0/256, 6.0/256, 4.0/256, 1.0/256]])
blur = np.expand_dims(np.expand_dims(blur, 2), 3)
fpf_kernel = tf.nn.conv2d(fpf_kernel, blur, strides=[1, 1, 1, 1],
padding='SAME')
#fpf_kernel = tf.nn.conv2d(fpf_kernel, blur, strides=[1, 1, 1, 1],
# padding='SAME')
fpf = tf.Variable(tf.reshape(fpf_kernel, [250, 250]), name='fpf')
flat_fpf = tf.transpose(tf.boolean_mask(tf.reshape(tf.nn.relu(fpf),
(62500, 1)),
img_mask), [1, 0])
# gabor features extraction
with tf.name_scope('feature-extract'):
feat_vtr = []
for i in range(9):
# config for the gabor filters
gabor_real = np.expand_dims(gabor_bank['f%s_real'%(i+1)], 2)
gabor_imag = np.expand_dims(gabor_bank['f%s_imag'%(i+1)], 2)
rconv = tf.nn.conv2d(img, gabor_real, strides=[1, 2, 2, 1],
padding='SAME')
iconv = tf.nn.conv2d(img, gabor_imag, strides=[1, 2, 2, 1],
padding='SAME')
gabor_energy = tf.sqrt(tf.square(rconv) + tf.square(iconv))
gabor_energy = tf.nn.local_response_normalization(gabor_energy,
depth_radius=7, bias=1, alpha=1, beta=0.5)
gabor_energy = tf.transpose(gabor_energy, perm=[1, 2, 3, 0])
gabor_energy = tf.boolean_mask(tf.reshape(gabor_energy,
[62500, -1]), img_mask)
# get feature summary from pooling field
gabor_feat = tf.reshape(tf.matmul(flat_fpf, gabor_energy),
(8, -1))
feat_vtr.append(gabor_feat)
# concatenate gabor features within fpf
vxl_feats = tf.concat(feat_vtr, 0)
# vars for feature weights
with tf.name_scope('weighted-features'):
b = tf.Variable(tf.constant(0.01, shape=[1]), name='bias')
w = tf.Variable(tf.constant(0.01, shape=[1, 72]), name='weights')
vxl_wt_feats = tf.matmul(w, vxl_feats)
rsp = tf.reshape(vxl_wt_feats + b, [-1])
# loss defination
with tf.name_scope('loss'):
# calculate fitting error
error = tf.reduce_mean(tf.square(rsp - rsp_))
# parameter regularization
# laplacian regularization
laplacian_kernel = np.array([[0, -1, 0],
[-1, 4, -1],
[0, -1, 0]])
laplacian_kernel = np.expand_dims(laplacian_kernel, 2)
laplacian_kernel = np.expand_dims(laplacian_kernel, 3)
fpf_shadow = tf.expand_dims(tf.expand_dims(fpf, 0), 3)
laplacian_error = tf.reduce_sum(tf.square(tf.nn.conv2d(fpf_shadow,
laplacian_kernel,
strides=[1, 1, 1, 1],
padding='VALID')))
# get total error
total_error = 10*error + laplacian_error
with tf.Session(graph=graph) as sess:
# find the optimal model
file_list = os.listdir(vxl_dir)
file_list = [item for item in file_list if item[-5:]=='index']
iter_num = [int(item.split('.')[0].split('-')[1]) for item in file_list]
sel_iter_num = min(iter_num)
model_path = os.path.join(vxl_dir, 'prf_model-%s'%(sel_iter_num))
# load saved model
saver = tf.train.Saver()
saver.restore(sess, model_path)
# test on validation dataset
input_imgs = val_imgs - np.expand_dims(img_m, 2)
input_imgs = np.transpose(input_imgs, (2, 0, 1))
input_imgs = np.expand_dims(input_imgs, 3)
pred_val_rsp = np.zeros(120)
for i in range(24):
part_rsp = sess.run(rsp, feed_dict={img: input_imgs[(i*5):(i*5+5)],
rsp_: vxl_rsp[(i*5):(i*5+5)]})
pred_val_rsp[(i*5):(i*5+5)] = part_rsp
val_err = np.mean(np.square(pred_val_rsp - vxl_rsp))
print 'Validation Error: %s'%(val_err)
# save final validation loss
with open(os.path.join(vxl_dir, 'test_loss.txt'), 'w+') as f:
f.write('%s\n'%(val_err))
return
def get_gabor_features(input_imgs, gabor_bank):
"""Get Gabor features from images"""
# vars for input data
img = tf.placeholder("float", [None, 500, 500, 1])
# gabor features extraction
feat_vtr = []
for i in range(9):
# config for the gabor filters
gabor_real = np.expand_dims(gabor_bank['f%s_real'%(i+1)], 2)
gabor_imag = np.expand_dims(gabor_bank['f%s_imag'%(i+1)], 2)
real_conv = tf.nn.conv2d(img, gabor_real, strides=[1, 2, 2, 1],
padding='SAME')
imag_conv = tf.nn.conv2d(img, gabor_imag, strides=[1, 2, 2, 1],
padding='SAME')
gabor_energy = tf.sqrt(tf.square(real_conv) + tf.square(imag_conv))
feat_vtr.append(gabor_energy)
# concatenate gabor features from various channels
gabor_feat = tf.concat(feat_vtr, 3)
# graph config
config = tf.ConfigProto()
with tf.Session(config=config) as sess:
sess.run(tf.initialize_all_variables())
gabor_file = 'train_gabor_feat.memdat'
fp = np.memmap(gabor_file, dtype='float32', mode='w+',
shape=(input_imgs.shape[2], 250, 250, 72))
for i in range(input_imgs.shape[2]/10):
x = input_imgs[..., (i*10):(i*10+10)]
x = np.transpose(x, (2, 0, 1))
x = np.expand_dims(x, 3)
gf = sess.run(gabor_feat, feed_dict={img: x})
fp[(i*10):(i*10+10)] = gf
def get_prf_weights(vxl_dir):
"""Get feature weights and fpf from trained model."""
graph = tf.Graph()
with tf.Session(graph=graph) as sess:
# find the optimal model and restore it
file_list = os.listdir(vxl_dir)
file_list = [item for item in file_list if item[-5:]=='index']
iter_num = [int(item.split('.')[0].split('-')[1]) for item in file_list]
sel_iter_num = min(iter_num)
model_path = os.path.join(vxl_dir, 'prf_model-%s'%(sel_iter_num))
saver = tf.train.import_meta_graph(model_path+'.meta')
saver.restore(sess, model_path)
# get fpf
fpf = graph.get_tensor_by_name('pooling-field/fpf:0').eval()
# get feature weights
wts = graph.get_tensor_by_name('weighted-features/weights:0').eval()
# get feature biases
b = graph.get_tensor_by_name('weighted-features/bias:0').eval()
return fpf, b, wts
def prf_reconstructor(img_mask, gobar_bank, wts, bias, fpfs, vxl_rsp):
"""Image reconstructor based on Activation Maximization."""
# vars for image
img = tf.Variable(tf.random_normal([1, 500, 500, 1], stddev=0.001),
name='input-img')
# full-size gabor bank
gabor_real = np.expand_dims(gabor_bank['gabor_real'], 2)
gabor_imag = np.expand_dims(gabor_bank['gabor_imag'], 2)
real_conv = tf.nn.conv2d(img, gabor_real, strides=[1, 2, 2, 1],
padding='SAME')
imag_conv = tf.nn.conv2d(img, gabor_imag, strides=[1, 2, 2, 1],
padding='SAME')
# gabor energy shape: 1, 250, 250, 72
gabor_energy = tf.sqrt(tf.square(real_conv) + tf.square(imag_conv))
# get feature summary from pooling field
#gabor_energy = tf.transpose(gabor_energy, perm=[3, 1, 2, 0])
# gabor energy shape: mask# * 72
gabor_energy = tf.boolean_mask(tf.reshape(gabor_energy, [62500, -1]),
img_mask)
# get valid space from fpfs
# flat_fpfps shape: 250, 250, voxel#
flat_fpfs = tf.transpose(fpfs, perm=[1, 2, 0])
# flat_fpfs shape: mask# * voxel#
flat_fpfs = tf.boolean_mask(tf.reshape(flat_fpfs, [62500, -1]), img_mask)
flat_fpfs = tf.transpose(flat_fpfs, perm=[1, 0])
# feat_vtr shape: voxel# * 72
feat_vtr = tf.matmul(tf.nn.relu(flat_fpfs), gabor_energy)
# get estimate neural activity
pred_rsp = tf.reduce_sum(tf.multiply(feat_vtr, wts), 1) + bias
# loss config
real_rsp = tf.placeholder(tf.float32, shape=(vxl_rsp.shape[0],))
error = tf.reduce_mean(tf.square(real_rsp - pred_rsp))
opt = tf.train.GradientDescentOptimizer(0.5)
vars_x = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, 'input-img')
solver = opt.minimize(error, var_list=vars_x)
# model solving
sess = tf.Session()
sess.run(tf.global_variables_initializer())
print 'Start ...'
for step in range(50):
print 'Step %s'%(step)
_, current_err, rec_img = sess.run([solver, error, img],
feed_dict={real_rsp: vxl_rsp})
if step%10==0:
print('Iter: {}; loss: {:.4}'.format(step, current_err))
fig=plt.figure()
plt.imshow(rec_img.reshape(500, 500))
plt.savefig('recons'+str(step)+'.png')
plt.close(fig)
return rec_img
if __name__ == '__main__':
"""Main function"""
# database directory config
# config parser
cf = configParser.Config('config')
# database directory config
db_dir = os.path.join(cf.get('database', 'path'), 'vim1')
# directory config for analysis
root_dir = cf.get('base', 'path')
feat_dir = os.path.join(root_dir, 'sfeatures', 'vim1')
res_dir = os.path.join(root_dir, 'subjects')
#-- general config
subj_id = 1
roi = 'v1'
# directory config
subj_dir = os.path.join(res_dir, 'vim1_S%s'%(subj_id))
prf_dir = os.path.join(subj_dir, 'prf')
roi_dir = os.path.join(prf_dir, roi, 'bn')
if not os.path.exists(roi_dir):
os.makedirs(roi_dir, 0755)
#-- parameter preparation
gabor_bank_file = os.path.join(db_dir, 'gabor_kernels_small.npz')
gabor_bank = np.load(gabor_bank_file)
#-- load vim1 stimuli
train_stimuli_file = os.path.join(db_dir, 'train_stimuli.npy')
train_imgs = np.load(train_stimuli_file)
val_stimuli_file = os.path.join(db_dir, 'val_stimuli.npy')
val_imgs = np.load(val_stimuli_file)
#-- get gabor features from stimuli
#get_gabor_features(train_imgs, gabor_bank)
#-- pRF model test bench
vxl_idx, train_ts, val_ts = dataio.load_vim1_fmri(db_dir, subj_id, roi=roi)
ts_m = np.mean(train_ts, axis=1, keepdims=True)
ts_s = np.std(train_ts, axis=1, keepdims=True)
train_ts = (train_ts - ts_m) / (ts_s + 1e-5)
ts_m = np.mean(val_ts, axis=1, keepdims=True)
ts_s = np.std(val_ts, axis=1, keepdims=True)
val_ts = (val_ts - ts_m) / (ts_s + 1e-5)
## to test the model. select following voxels
#sel_vxl_idx = [93, 257, 262, 385, 409, 485, 511, 517, 518, 603, 614,
# 807, 819, 820, 822, 826, 871, 929, 953, 1211]
#for i in sel_vxl_idx[:5]:
for i in range(0, 10):
print 'Voxel %s - %s'%(i, vxl_idx[i])
vxl_dir = os.path.join(roi_dir, 'voxel_%s'%(vxl_idx[i]))
os.makedirs(vxl_dir, 0755)
# load voxel fmri data
vxl_rsp = train_ts[i]
tfprf_laplacian_bn(train_imgs, vxl_rsp, gabor_bank, vxl_dir)
#tfprf_laplacian(train_imgs, vxl_rsp, gabor_bank, vxl_dir)
#refine_dir = os.path.join(vxl_dir, 'refine')
#if os.path.exists(refine_dir):
# os.system('rm -rf %s'%(refine_dir))
#tfprf_laplacian_refine(train_imgs, vxl_rsp, gabor_bank, vxl_dir)
vxl_rsp = val_ts[i]
tfprf_test(train_imgs, val_imgs, vxl_rsp, gabor_bank, vxl_dir)
#-- get validation r^2
#vxl_idx, train_ts, val_ts = dataio.load_vim1_fmri(db_dir, subj_id, roi=roi)
#ts_m = np.mean(train_ts, axis=1, keepdims=True)
#ts_s = np.std(train_ts, axis=1, keepdims=True)
#train_ts = (train_ts - ts_m) / (ts_s + 1e-5)
#val_r2 = np.zeros(vxl_idx.shape[0])
#for i in range(vxl_idx.shape[0]):
# print 'Voxel %s - %s'%(i, vxl_idx[i])
# vxl_dir = os.path.join(roi_dir, 'voxel_%s'%(vxl_idx[i]), 'refine')
# val_mse = open(os.path.join(vxl_dir, 'val_loss.txt'), 'r').readlines()
# val_mse = float(val_mse[0].strip())
# # calculate r^2
# val_rsp = train_ts[i, 1575:]
# ss_tol = np.var(val_rsp)
# print 'Total variance: %s'%(ss_tol)
# print 'MSE: %s'%(val_mse)
# r2 = 1.0 - val_mse * 1.0 / ss_tol
# val_r2[i] = r2
#outfile = os.path.join(roi_dir, 'dl_prf_refine_val_r2.npy')
#np.save(outfile, val_r2)
#-- get r^2 on test dataset
#vxl_idx, train_ts, val_ts = dataio.load_vim1_fmri(db_dir, subj_id, roi=roi)
#test_r2 = np.zeros(vxl_idx.shape[0])
#for i in range(vxl_idx.shape[0]):
# print 'Voxel %s - %s'%(i, vxl_idx[i])
# vxl_dir = os.path.join(roi_dir, 'voxel_%s'%(vxl_idx[i]), 'refine')
# test_mse = open(os.path.join(vxl_dir,'test_loss.txt'), 'r').readlines()
# test_mse = float(test_mse[0].strip())
# # calculate r^2
# print 'MSE: %s'%(test_mse)
# r2 = 1.0 - test_mse*1.0
# test_r2[i] = r2
#outfile = os.path.join(roi_dir, 'dl_prf_refine_test_r2.npy')
#np.save(outfile, test_r2)
#-- get learned weights from voxel-specific model
#vxl_idx, train_ts, val_ts = dataio.load_vim1_fmri(db_dir, subj_id, roi=roi)
#wts = np.zeros((vxl_idx.shape[0], 72))
#fpfs = np.zeros((vxl_idx.shape[0], 250, 250))
#biases = np.zeros((vxl_idx.shape[0],))
#for i in range(vxl_idx.shape[0]):
# print 'Voxel %s - %s'%(i, vxl_idx[i])
# vxl_dir = os.path.join(roi_dir, 'voxel_%s'%(vxl_idx[i]), 'refine')
# fpf, b, wt = get_prf_weights(vxl_dir)
# outfile = os.path.join(vxl_dir, 'model_wts')
# np.savez(outfile, fpf=fpf, wt=wt, bias=b)
# fpfs[i, ...] = fpf
# wts[i] = wt
# biases[i] = b
#model_wts_file = os.path.join(roi_dir, 'merged_model_wts')
#np.savez(model_wts_file, fpfs=fpfs, wts=wts, biases=biases)
#-- visual reconstruction using cnn-prf
#vxl_idx, train_ts, val_ts = dataio.load_vim1_fmri(db_dir, subj_id, roi=roi)
#ts_m = np.mean(val_ts, axis=1, keepdims=True)
#ts_s = np.std(val_ts, axis=1, keepdims=True)
#val_ts = (val_ts - ts_m) / (ts_s + 1e-5)
## load training images
#train_stimuli_file = os.path.join(db_dir, 'train_stimuli.npy')
#train_imgs = np.load(train_stimuli_file)
## get image mask
#img_m = np.mean(train_imgs, axis=2)
#img_mask = imresize(img_m, (250, 250))
##img_mask = img_mask<170
#img_mask = np.reshape(img_mask<170, [-1])
# load estimated prf parameters
#model_wts = np.load(os.path.join(roi_dir, 'merged_model_wts.npz'))
## load model test result
#dl_test_r2 = np.load(os.path.join(roi_dir, 'dl_prf_refine_test_r2.npy'))
## select voxels
#thres = 0.1
#sel_idx = np.nonzero(dl_test_r2>=thres)[0]
#print 'Select %s voxels for image reconstruction'%(sel_idx.shape[0])
#sel_wts = model_wts['wts'][sel_idx].astype(np.float32)
#sel_fpfs = model_wts['fpfs'][sel_idx].astype(np.float32)
##sel_fpfs = sel_fpfs * img_mask
#sel_bias = model_wts['biases'][sel_idx].astype(np.float32)
### generate selcted voxels' fpfs
##fpf_mask = sel_fpfs>0
##fpf_mask = np.mean(fpf_mask, axis=0)
##fig=plt.figure()
##plt.imshow(fpf_mask)
##plt.colorbar()
##plt.savefig('fpf_mask.png')
##plt.close(fig)
## get voxel response and reconstruct image
#vxl_rsp = val_ts[sel_idx, 0]
#print 'Voxel response shape: ',
#print vxl_rsp.shape
## load gabor bank
#gabor_bank_file = os.path.join(db_dir, 'gabor_kernels.npz')
#gabor_bank = np.load(gabor_bank_file)
#rec = prf_reconstructor(img_mask, gabor_bank, sel_wts, sel_bias,
# sel_fpfs, vxl_rsp)
## model pre-testing and visual reconstruction
#-- parameter preparation
#gabor_bank_file = os.path.join(feat_dir, 'gabor_kernels.npz')
#gabor_bank = np.load(gabor_bank_file)
#vxl_coding_paras_file =os.path.join(prf_dir,'tfrecon','vxl_coding_wts.npz')
#vxl_coding_paras = np.load(vxl_coding_paras_file)
#-- test encoding model
#print 'Select voxel index',
#print vxl_coding_paras['vxl_idx']
#img_file = os.path.join(root_dir, 'example_imgs.npy')
#imgs = np.load(img_file)
#model_test(imgs, gabor_bank, vxl_coding_paras)
#-- stimuli reconstruction
#resp_file = os.path.join(db_dir, 'EstimatedResponses.mat')
#resp_mat = tables.open_file(resp_file)
## create mask
## train data shape: (1750, ~25000)
#train_ts = resp_mat.get_node('/dataTrnS%s'%(subj_id))[:]
## reshape fmri response: data shape (#voxel, 1750/120)
#train_ts = np.nan_to_num(train_ts.T)
#m = np.mean(train_ts, axis=1, keepdims=True)
#s = np.std(train_ts, axis=1, keepdims=True)
#train_ts = (train_ts - m) / (s + 1e-5)
##val_ts = tf.get_node('/dataValS%s'%(subj_id))[:]
##val_ts = val_ts.T
##val_ts = np.nan_to_num(val_ts[vxl_idx])
#resp_mat.close()
#y_ = train_ts[vxl_coding_paras['vxl_idx'].astype(np.int)]
## shape: (#voxel, 1750)
#print y_.shape
#recon_img = reconstructor(gabor_bank, vxl_coding_paras, y_)
## show image
#fig=plt.figure()
#plt.imshow(recon_img. cmap='gray')
#plt.savefig('recons.png')
|
bsd-3-clause
|
BorisJeremic/Real-ESSI-Examples
|
analytic_solution/test_cases/Contact/Coupled_Contact/Steady_State_Single_Foundation_Sysytem_Under_Tension/CoupledHardContact/n_0.5/Plot_Current.py
|
12
|
3447
|
#!/usr/bin/env python
#!/usr/bin/python
import h5py
from matplotlib import pylab
import matplotlib.pylab as plt
import sys
from matplotlib.font_manager import FontProperties
import math
import numpy as np
#!/usr/bin/python
import h5py
import matplotlib.pylab as plt
import matplotlib as mpl
import sys
import numpy as np;
plt.rcParams.update({'font.size': 30})
# set tick width
mpl.rcParams['xtick.major.size'] = 10
mpl.rcParams['xtick.major.width'] = 5
mpl.rcParams['xtick.minor.size'] = 10
mpl.rcParams['xtick.minor.width'] = 5
plt.rcParams['xtick.labelsize']=28
mpl.rcParams['ytick.major.size'] = 10
mpl.rcParams['ytick.major.width'] = 5
mpl.rcParams['ytick.minor.size'] = 10
mpl.rcParams['ytick.minor.width'] = 5
plt.rcParams['ytick.labelsize']=28
# Plot the figure. Add labels and titles.
plt.figure()
ax = plt.subplot(111)
ax.grid()
ax.set_xlabel("Time [s] ")
ax.set_ylabel(r"Stress [Pa] ")
# Pore Pressure
# #########################################################################
thefile = "Soil_Foundation_System_Surface_Load.h5.feioutput";
finput = h5py.File(thefile)
# Read the time and displacement
times = finput["time"][:]
upU_p = finput["/Model/Nodes/Generalized_Displacements"][3,:]
upU_u = finput["/Model/Nodes/Generalized_Displacements"][2,:]
upU_U = finput["/Model/Nodes/Generalized_Displacements"][6,:]
u_u = finput["/Model/Nodes/Generalized_Displacements"][79,:]
sigma_zz_ = finput["/Model/Elements/Gauss_Outputs"][14,:]
# pore_pressure
ax.plot(times,upU_p,'b',linewidth=2,label=r'Pore Pressure $p$');
ax.hold(True);
# Total Stress
# #########################################################################
# Read the time and displacement
times = finput["time"][:];
T = times[len(times)-1]
sigma_zz = --400/T*times
# kinetic energy
ax.plot(times,sigma_zz,'k',linewidth=2,label=r'Total Stress $\sigma$');
ax.hold(True);
# Effective Stress
# #########################################################################
# Read the time and displacement
times = finput["time"][:];
sigma_zz_ = sigma_zz - upU_p
# kinetic energy
ax.plot(times,sigma_zz_,'r',linewidth=2,label=r'''Effective Stress $\sigma^{\prime}$''');
ax.hold(True);
max_yticks = 5
yloc = plt.MaxNLocator(max_yticks)
ax.yaxis.set_major_locator(yloc)
max_xticks = 5
yloc = plt.MaxNLocator(max_xticks)
ax.xaxis.set_major_locator(yloc)
ax.legend(loc='upper center', bbox_to_anchor=(0.5, 1.35),
ncol=2, fancybox=True, shadow=True, prop={'size': 24})
pylab.savefig("Current_Effective_Stress_Principle_At_Interface.pdf", bbox_inches='tight')
# plt.show()
#
################################### Drainage Condition Verification #############################
ax.hold(False);
fig = plt.figure();
ax = plt.subplot(111)
ax.plot(times,upU_u*1e8,'k',linewidth=3,label=r'$upU\_u$'); ax.hold(True);
ax.plot(times,upU_U*1e8,'b',linewidth=10,label=r'$upU\_U$'); ax.hold(True);
ax.plot(times,u_u*1e8,'r',linewidth=3,label=r'$u\_u$'); ax.hold(True);
ax.grid()
ax.set_xlabel("Time [s] ")
ax.set_ylabel(r"Displacement $\times 1e^{-8}$ [m] ")
ax.legend(loc='upper center', bbox_to_anchor=(0.5, 1.25),
ncol=4, fancybox=True, shadow=True, prop={'size': 24})
max_yticks = 5
yloc = plt.MaxNLocator(max_yticks)
ax.yaxis.set_major_locator(yloc)
max_xticks = 5
yloc = plt.MaxNLocator(max_xticks)
ax.xaxis.set_major_locator(yloc)
pylab.savefig("Current_Displacement_At_Interface.pdf", bbox_inches='tight')
# plt.show()
|
cc0-1.0
|
rishikksh20/scikit-learn
|
examples/calibration/plot_calibration.py
|
66
|
4795
|
"""
======================================
Probability calibration of classifiers
======================================
When performing classification you often want to predict not only
the class label, but also the associated probability. This probability
gives you some kind of confidence on the prediction. However, not all
classifiers provide well-calibrated probabilities, some being over-confident
while others being under-confident. Thus, a separate calibration of predicted
probabilities is often desirable as a postprocessing. This example illustrates
two different methods for this calibration and evaluates the quality of the
returned probabilities using Brier's score
(see https://en.wikipedia.org/wiki/Brier_score).
Compared are the estimated probability using a Gaussian naive Bayes classifier
without calibration, with a sigmoid calibration, and with a non-parametric
isotonic calibration. One can observe that only the non-parametric model is able
to provide a probability calibration that returns probabilities close to the
expected 0.5 for most of the samples belonging to the middle cluster with
heterogeneous labels. This results in a significantly improved Brier score.
"""
print(__doc__)
# Author: Mathieu Blondel <[email protected]>
# Alexandre Gramfort <[email protected]>
# Balazs Kegl <[email protected]>
# Jan Hendrik Metzen <[email protected]>
# License: BSD Style.
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
from sklearn.datasets import make_blobs
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import brier_score_loss
from sklearn.calibration import CalibratedClassifierCV
from sklearn.model_selection import train_test_split
n_samples = 50000
n_bins = 3 # use 3 bins for calibration_curve as we have 3 clusters here
# Generate 3 blobs with 2 classes where the second blob contains
# half positive samples and half negative samples. Probability in this
# blob is therefore 0.5.
centers = [(-5, -5), (0, 0), (5, 5)]
X, y = make_blobs(n_samples=n_samples, n_features=2, cluster_std=1.0,
centers=centers, shuffle=False, random_state=42)
y[:n_samples // 2] = 0
y[n_samples // 2:] = 1
sample_weight = np.random.RandomState(42).rand(y.shape[0])
# split train, test for calibration
X_train, X_test, y_train, y_test, sw_train, sw_test = \
train_test_split(X, y, sample_weight, test_size=0.9, random_state=42)
# Gaussian Naive-Bayes with no calibration
clf = GaussianNB()
clf.fit(X_train, y_train) # GaussianNB itself does not support sample-weights
prob_pos_clf = clf.predict_proba(X_test)[:, 1]
# Gaussian Naive-Bayes with isotonic calibration
clf_isotonic = CalibratedClassifierCV(clf, cv=2, method='isotonic')
clf_isotonic.fit(X_train, y_train, sw_train)
prob_pos_isotonic = clf_isotonic.predict_proba(X_test)[:, 1]
# Gaussian Naive-Bayes with sigmoid calibration
clf_sigmoid = CalibratedClassifierCV(clf, cv=2, method='sigmoid')
clf_sigmoid.fit(X_train, y_train, sw_train)
prob_pos_sigmoid = clf_sigmoid.predict_proba(X_test)[:, 1]
print("Brier scores: (the smaller the better)")
clf_score = brier_score_loss(y_test, prob_pos_clf, sw_test)
print("No calibration: %1.3f" % clf_score)
clf_isotonic_score = brier_score_loss(y_test, prob_pos_isotonic, sw_test)
print("With isotonic calibration: %1.3f" % clf_isotonic_score)
clf_sigmoid_score = brier_score_loss(y_test, prob_pos_sigmoid, sw_test)
print("With sigmoid calibration: %1.3f" % clf_sigmoid_score)
###############################################################################
# Plot the data and the predicted probabilities
plt.figure()
y_unique = np.unique(y)
colors = cm.rainbow(np.linspace(0.0, 1.0, y_unique.size))
for this_y, color in zip(y_unique, colors):
this_X = X_train[y_train == this_y]
this_sw = sw_train[y_train == this_y]
plt.scatter(this_X[:, 0], this_X[:, 1], s=this_sw * 50, c=color, alpha=0.5,
label="Class %s" % this_y)
plt.legend(loc="best")
plt.title("Data")
plt.figure()
order = np.lexsort((prob_pos_clf, ))
plt.plot(prob_pos_clf[order], 'r', label='No calibration (%1.3f)' % clf_score)
plt.plot(prob_pos_isotonic[order], 'g', linewidth=3,
label='Isotonic calibration (%1.3f)' % clf_isotonic_score)
plt.plot(prob_pos_sigmoid[order], 'b', linewidth=3,
label='Sigmoid calibration (%1.3f)' % clf_sigmoid_score)
plt.plot(np.linspace(0, y_test.size, 51)[1::2],
y_test[order].reshape(25, -1).mean(1),
'k', linewidth=3, label=r'Empirical')
plt.ylim([-0.05, 1.05])
plt.xlabel("Instances sorted according to predicted probability "
"(uncalibrated GNB)")
plt.ylabel("P(y=1)")
plt.legend(loc="upper left")
plt.title("Gaussian naive Bayes probabilities")
plt.show()
|
bsd-3-clause
|
DailyActie/Surrogate-Model
|
01-codes/scikit-learn-master/sklearn/linear_model/logistic.py
|
1
|
67834
|
"""
Logistic Regression
"""
# Author: Gael Varoquaux <[email protected]>
# Fabian Pedregosa <[email protected]>
# Alexandre Gramfort <[email protected]>
# Manoj Kumar <[email protected]>
# Lars Buitinck
# Simon Wu <[email protected]>
import numbers
import warnings
import numpy as np
from scipy import optimize, sparse
from .base import LinearClassifierMixin, SparseCoefMixin, BaseEstimator
from .sag import sag_solver
from ..exceptions import DataConversionWarning
from ..exceptions import NotFittedError
from ..externals import six
from ..externals.joblib import Parallel, delayed
from ..feature_selection.from_model import _LearntSelectorMixin
from ..metrics import SCORERS
from ..model_selection import check_cv
from ..preprocessing import LabelEncoder, LabelBinarizer
from ..svm.base import _fit_liblinear
from ..utils import check_array, check_consistent_length, compute_class_weight
from ..utils import check_random_state
from ..utils.extmath import (logsumexp, log_logistic, safe_sparse_dot,
softmax, squared_norm)
from ..utils.extmath import row_norms
from ..utils.fixes import expit
from ..utils.multiclass import check_classification_targets
from ..utils.optimize import newton_cg
from ..utils.validation import check_X_y
# .. some helper functions for logistic_regression_path ..
def _intercept_dot(w, X, y):
"""Computes y * np.dot(X, w).
It takes into consideration if the intercept should be fit or not.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
"""
c = 0.
if w.size == X.shape[1] + 1:
c = w[-1]
w = w[:-1]
z = safe_sparse_dot(X, w) + c
return w, c, y * z
def _logistic_loss_and_grad(w, X, y, alpha, sample_weight=None):
"""Computes the logistic loss and gradient.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
out : float
Logistic loss.
grad : ndarray, shape (n_features,) or (n_features + 1,)
Logistic gradient.
"""
_, n_features = X.shape
grad = np.empty_like(w)
w, c, yz = _intercept_dot(w, X, y)
if sample_weight is None:
sample_weight = np.ones(y.shape[0])
# Logistic loss is the negative of the log of the logistic function.
out = -np.sum(sample_weight * log_logistic(yz)) + .5 * alpha * np.dot(w, w)
z = expit(yz)
z0 = sample_weight * (z - 1) * y
grad[:n_features] = safe_sparse_dot(X.T, z0) + alpha * w
# Case where we fit the intercept.
if grad.shape[0] > n_features:
grad[-1] = z0.sum()
return out, grad
def _logistic_loss(w, X, y, alpha, sample_weight=None):
"""Computes the logistic loss.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
out : float
Logistic loss.
"""
w, c, yz = _intercept_dot(w, X, y)
if sample_weight is None:
sample_weight = np.ones(y.shape[0])
# Logistic loss is the negative of the log of the logistic function.
out = -np.sum(sample_weight * log_logistic(yz)) + .5 * alpha * np.dot(w, w)
return out
def _logistic_grad_hess(w, X, y, alpha, sample_weight=None):
"""Computes the gradient and the Hessian, in the case of a logistic loss.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
grad : ndarray, shape (n_features,) or (n_features + 1,)
Logistic gradient.
Hs : callable
Function that takes the gradient as a parameter and returns the
matrix product of the Hessian and gradient.
"""
n_samples, n_features = X.shape
grad = np.empty_like(w)
fit_intercept = grad.shape[0] > n_features
w, c, yz = _intercept_dot(w, X, y)
if sample_weight is None:
sample_weight = np.ones(y.shape[0])
z = expit(yz)
z0 = sample_weight * (z - 1) * y
grad[:n_features] = safe_sparse_dot(X.T, z0) + alpha * w
# Case where we fit the intercept.
if fit_intercept:
grad[-1] = z0.sum()
# The mat-vec product of the Hessian
d = sample_weight * z * (1 - z)
if sparse.issparse(X):
dX = safe_sparse_dot(sparse.dia_matrix((d, 0),
shape=(n_samples, n_samples)), X)
else:
# Precompute as much as possible
dX = d[:, np.newaxis] * X
if fit_intercept:
# Calculate the double derivative with respect to intercept
# In the case of sparse matrices this returns a matrix object.
dd_intercept = np.squeeze(np.array(dX.sum(axis=0)))
def Hs(s):
ret = np.empty_like(s)
ret[:n_features] = X.T.dot(dX.dot(s[:n_features]))
ret[:n_features] += alpha * s[:n_features]
# For the fit intercept case.
if fit_intercept:
ret[:n_features] += s[-1] * dd_intercept
ret[-1] = dd_intercept.dot(s[:n_features])
ret[-1] += d.sum() * s[-1]
return ret
return grad, Hs
def _multinomial_loss(w, X, Y, alpha, sample_weight):
"""Computes multinomial loss and class probabilities.
Parameters
----------
w : ndarray, shape (n_classes * n_features,) or
(n_classes * (n_features + 1),)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Y : ndarray, shape (n_samples, n_classes)
Transformed labels according to the output of LabelBinarizer.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
loss : float
Multinomial loss.
p : ndarray, shape (n_samples, n_classes)
Estimated class probabilities.
w : ndarray, shape (n_classes, n_features)
Reshaped param vector excluding intercept terms.
Reference
---------
Bishop, C. M. (2006). Pattern recognition and machine learning.
Springer. (Chapter 4.3.4)
"""
n_classes = Y.shape[1]
n_features = X.shape[1]
fit_intercept = w.size == (n_classes * (n_features + 1))
w = w.reshape(n_classes, -1)
sample_weight = sample_weight[:, np.newaxis]
if fit_intercept:
intercept = w[:, -1]
w = w[:, :-1]
else:
intercept = 0
p = safe_sparse_dot(X, w.T)
p += intercept
p -= logsumexp(p, axis=1)[:, np.newaxis]
loss = -(sample_weight * Y * p).sum()
loss += 0.5 * alpha * squared_norm(w)
p = np.exp(p, p)
return loss, p, w
def _multinomial_loss_grad(w, X, Y, alpha, sample_weight):
"""Computes the multinomial loss, gradient and class probabilities.
Parameters
----------
w : ndarray, shape (n_classes * n_features,) or
(n_classes * (n_features + 1),)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Y : ndarray, shape (n_samples, n_classes)
Transformed labels according to the output of LabelBinarizer.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
Returns
-------
loss : float
Multinomial loss.
grad : ndarray, shape (n_classes * n_features,) or
(n_classes * (n_features + 1),)
Ravelled gradient of the multinomial loss.
p : ndarray, shape (n_samples, n_classes)
Estimated class probabilities
Reference
---------
Bishop, C. M. (2006). Pattern recognition and machine learning.
Springer. (Chapter 4.3.4)
"""
n_classes = Y.shape[1]
n_features = X.shape[1]
fit_intercept = (w.size == n_classes * (n_features + 1))
grad = np.zeros((n_classes, n_features + bool(fit_intercept)))
loss, p, w = _multinomial_loss(w, X, Y, alpha, sample_weight)
sample_weight = sample_weight[:, np.newaxis]
diff = sample_weight * (p - Y)
grad[:, :n_features] = safe_sparse_dot(diff.T, X)
grad[:, :n_features] += alpha * w
if fit_intercept:
grad[:, -1] = diff.sum(axis=0)
return loss, grad.ravel(), p
def _multinomial_grad_hess(w, X, Y, alpha, sample_weight):
"""
Computes the gradient and the Hessian, in the case of a multinomial loss.
Parameters
----------
w : ndarray, shape (n_classes * n_features,) or
(n_classes * (n_features + 1),)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Y : ndarray, shape (n_samples, n_classes)
Transformed labels according to the output of LabelBinarizer.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
Returns
-------
grad : array, shape (n_classes * n_features,) or
(n_classes * (n_features + 1),)
Ravelled gradient of the multinomial loss.
hessp : callable
Function that takes in a vector input of shape (n_classes * n_features)
or (n_classes * (n_features + 1)) and returns matrix-vector product
with hessian.
References
----------
Barak A. Pearlmutter (1993). Fast Exact Multiplication by the Hessian.
http://www.bcl.hamilton.ie/~barak/papers/nc-hessian.pdf
"""
n_features = X.shape[1]
n_classes = Y.shape[1]
fit_intercept = w.size == (n_classes * (n_features + 1))
# `loss` is unused. Refactoring to avoid computing it does not
# significantly speed up the computation and decreases readability
loss, grad, p = _multinomial_loss_grad(w, X, Y, alpha, sample_weight)
sample_weight = sample_weight[:, np.newaxis]
# Hessian-vector product derived by applying the R-operator on the gradient
# of the multinomial loss function.
def hessp(v):
v = v.reshape(n_classes, -1)
if fit_intercept:
inter_terms = v[:, -1]
v = v[:, :-1]
else:
inter_terms = 0
# r_yhat holds the result of applying the R-operator on the multinomial
# estimator.
r_yhat = safe_sparse_dot(X, v.T)
r_yhat += inter_terms
r_yhat += (-p * r_yhat).sum(axis=1)[:, np.newaxis]
r_yhat *= p
r_yhat *= sample_weight
hessProd = np.zeros((n_classes, n_features + bool(fit_intercept)))
hessProd[:, :n_features] = safe_sparse_dot(r_yhat.T, X)
hessProd[:, :n_features] += v * alpha
if fit_intercept:
hessProd[:, -1] = r_yhat.sum(axis=0)
return hessProd.ravel()
return grad, hessp
def _check_solver_option(solver, multi_class, penalty, dual):
if solver not in ['liblinear', 'newton-cg', 'lbfgs', 'sag']:
raise ValueError("Logistic Regression supports only liblinear,"
" newton-cg, lbfgs and sag solvers, got %s" % solver)
if multi_class not in ['multinomial', 'ovr']:
raise ValueError("multi_class should be either multinomial or "
"ovr, got %s" % multi_class)
if multi_class == 'multinomial' and solver == 'liblinear':
raise ValueError("Solver %s does not support "
"a multinomial backend." % solver)
if solver != 'liblinear':
if penalty != 'l2':
raise ValueError("Solver %s supports only l2 penalties, "
"got %s penalty." % (solver, penalty))
if dual:
raise ValueError("Solver %s supports only "
"dual=False, got dual=%s" % (solver, dual))
def logistic_regression_path(X, y, pos_class=None, Cs=10, fit_intercept=True,
max_iter=100, tol=1e-4, verbose=0,
solver='lbfgs', coef=None, copy=False,
class_weight=None, dual=False, penalty='l2',
intercept_scaling=1., multi_class='ovr',
random_state=None, check_input=True,
max_squared_sum=None, sample_weight=None):
"""Compute a Logistic Regression model for a list of regularization
parameters.
This is an implementation that uses the result of the previous model
to speed up computations along the set of solutions, making it faster
than sequentially calling LogisticRegression for the different parameters.
Read more in the :ref:`User Guide <logistic_regression>`.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data.
y : array-like, shape (n_samples,)
Input data, target values.
Cs : int | array-like, shape (n_cs,)
List of values for the regularization parameter or integer specifying
the number of regularization parameters that should be used. In this
case, the parameters will be chosen in a logarithmic scale between
1e-4 and 1e4.
pos_class : int, None
The class with respect to which we perform a one-vs-all fit.
If None, then it is assumed that the given problem is binary.
fit_intercept : bool
Whether to fit an intercept for the model. In this case the shape of
the returned array is (n_cs, n_features + 1).
max_iter : int
Maximum number of iterations for the solver.
tol : float
Stopping criterion. For the newton-cg and lbfgs solvers, the iteration
will stop when ``max{|g_i | i = 1, ..., n} <= tol``
where ``g_i`` is the i-th component of the gradient.
verbose : int
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
solver : {'lbfgs', 'newton-cg', 'liblinear', 'sag'}
Numerical solver to use.
coef : array-like, shape (n_features,), default None
Initialization value for coefficients of logistic regression.
Useless for liblinear solver.
copy : bool, default False
Whether or not to produce a copy of the data. A copy is not required
anymore. This parameter is deprecated and will be removed in 0.19.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
dual : bool
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
penalty : str, 'l1' or 'l2'
Used to specify the norm used in the penalization. The 'newton-cg',
'sag' and 'lbfgs' solvers support only l2 penalties.
intercept_scaling : float, default 1.
This parameter is useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
multi_class : str, {'ovr', 'multinomial'}
Multiclass option can be either 'ovr' or 'multinomial'. If the option
chosen is 'ovr', then a binary problem is fit for each label. Else
the loss minimised is the multinomial loss fit across
the entire probability distribution. Works only for the 'lbfgs' and
'newton-cg' solvers.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data. Used only in solvers 'sag' and 'liblinear'.
check_input : bool, default True
If False, the input arrays X and y will not be checked.
max_squared_sum : float, default None
Maximum squared sum of X over samples. Used only in SAG solver.
If None, it will be computed, going through all the samples.
The value should be precomputed to speed up cross validation.
sample_weight : array-like, shape(n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
coefs : ndarray, shape (n_cs, n_features) or (n_cs, n_features + 1)
List of coefficients for the Logistic Regression model. If
fit_intercept is set to True then the second dimension will be
n_features + 1, where the last item represents the intercept.
Cs : ndarray
Grid of Cs used for cross-validation.
n_iter : array, shape (n_cs,)
Actual number of iteration for each Cs.
Notes
-----
You might get slightly different results with the solver liblinear than
with the others since this uses LIBLINEAR which penalizes the intercept.
"""
if copy:
warnings.warn("A copy is not required anymore. The 'copy' parameter "
"is deprecated and will be removed in 0.19.",
DeprecationWarning)
if isinstance(Cs, numbers.Integral):
Cs = np.logspace(-4, 4, Cs)
_check_solver_option(solver, multi_class, penalty, dual)
# Preprocessing.
if check_input or copy:
X = check_array(X, accept_sparse='csr', dtype=np.float64)
y = check_array(y, ensure_2d=False, copy=copy, dtype=None)
check_consistent_length(X, y)
_, n_features = X.shape
classes = np.unique(y)
random_state = check_random_state(random_state)
if pos_class is None and multi_class != 'multinomial':
if (classes.size > 2):
raise ValueError('To fit OvR, use the pos_class argument')
# np.unique(y) gives labels in sorted order.
pos_class = classes[1]
# If sample weights exist, convert them to array (support for lists)
# and check length
# Otherwise set them to 1 for all examples
if sample_weight is not None:
sample_weight = np.array(sample_weight, dtype=np.float64, order='C')
check_consistent_length(y, sample_weight)
else:
sample_weight = np.ones(X.shape[0])
# If class_weights is a dict (provided by the user), the weights
# are assigned to the original labels. If it is "balanced", then
# the class_weights are assigned after masking the labels with a OvR.
le = LabelEncoder()
if isinstance(class_weight, dict) or multi_class == 'multinomial':
if solver == "liblinear":
if classes.size == 2:
# Reconstruct the weights with keys 1 and -1
temp = {1: class_weight[pos_class],
-1: class_weight[classes[0]]}
class_weight = temp.copy()
else:
raise ValueError("In LogisticRegressionCV the liblinear "
"solver cannot handle multiclass with "
"class_weight of type dict. Use the lbfgs, "
"newton-cg or sag solvers or set "
"class_weight='balanced'")
else:
class_weight_ = compute_class_weight(class_weight, classes, y)
sample_weight *= class_weight_[le.fit_transform(y)]
# For doing a ovr, we need to mask the labels first. for the
# multinomial case this is not necessary.
if multi_class == 'ovr':
w0 = np.zeros(n_features + int(fit_intercept))
mask_classes = np.array([-1, 1])
mask = (y == pos_class)
y_bin = np.ones(y.shape, dtype=np.float64)
y_bin[~mask] = -1.
# for compute_class_weight
# 'auto' is deprecated and will be removed in 0.19
if class_weight in ("auto", "balanced"):
class_weight_ = compute_class_weight(class_weight, mask_classes,
y_bin)
sample_weight *= class_weight_[le.fit_transform(y_bin)]
else:
if solver != 'sag':
lbin = LabelBinarizer()
Y_multi = lbin.fit_transform(y)
if Y_multi.shape[1] == 1:
Y_multi = np.hstack([1 - Y_multi, Y_multi])
else:
# SAG multinomial solver needs LabelEncoder, not LabelBinarizer
le = LabelEncoder()
Y_multi = le.fit_transform(y)
w0 = np.zeros((classes.size, n_features + int(fit_intercept)),
order='F')
if coef is not None:
# it must work both giving the bias term and not
if multi_class == 'ovr':
if coef.size not in (n_features, w0.size):
raise ValueError(
'Initialization coef is of shape %d, expected shape '
'%d or %d' % (coef.size, n_features, w0.size))
w0[:coef.size] = coef
else:
# For binary problems coef.shape[0] should be 1, otherwise it
# should be classes.size.
n_classes = classes.size
if n_classes == 2:
n_classes = 1
if (coef.shape[0] != n_classes or
coef.shape[1] not in (n_features, n_features + 1)):
raise ValueError(
'Initialization coef is of shape (%d, %d), expected '
'shape (%d, %d) or (%d, %d)' % (
coef.shape[0], coef.shape[1], classes.size,
n_features, classes.size, n_features + 1))
w0[:, :coef.shape[1]] = coef
if multi_class == 'multinomial':
# fmin_l_bfgs_b and newton-cg accepts only ravelled parameters.
if solver in ['lbfgs', 'newton-cg']:
w0 = w0.ravel()
target = Y_multi
if solver == 'lbfgs':
func = lambda x, *args: _multinomial_loss_grad(x, *args)[0:2]
elif solver == 'newton-cg':
func = lambda x, *args: _multinomial_loss(x, *args)[0]
grad = lambda x, *args: _multinomial_loss_grad(x, *args)[1]
hess = _multinomial_grad_hess
warm_start_sag = {'coef': w0.T}
else:
target = y_bin
if solver == 'lbfgs':
func = _logistic_loss_and_grad
elif solver == 'newton-cg':
func = _logistic_loss
grad = lambda x, *args: _logistic_loss_and_grad(x, *args)[1]
hess = _logistic_grad_hess
warm_start_sag = {'coef': np.expand_dims(w0, axis=1)}
coefs = list()
n_iter = np.zeros(len(Cs), dtype=np.int32)
for i, C in enumerate(Cs):
if solver == 'lbfgs':
try:
w0, loss, info = optimize.fmin_l_bfgs_b(
func, w0, fprime=None,
args=(X, target, 1. / C, sample_weight),
iprint=(verbose > 0) - 1, pgtol=tol, maxiter=max_iter)
except TypeError:
# old scipy doesn't have maxiter
w0, loss, info = optimize.fmin_l_bfgs_b(
func, w0, fprime=None,
args=(X, target, 1. / C, sample_weight),
iprint=(verbose > 0) - 1, pgtol=tol)
if info["warnflag"] == 1 and verbose > 0:
warnings.warn("lbfgs failed to converge. Increase the number "
"of iterations.")
try:
n_iter_i = info['nit'] - 1
except:
n_iter_i = info['funcalls'] - 1
elif solver == 'newton-cg':
args = (X, target, 1. / C, sample_weight)
w0, n_iter_i = newton_cg(hess, func, grad, w0, args=args,
maxiter=max_iter, tol=tol)
elif solver == 'liblinear':
coef_, intercept_, n_iter_i, = _fit_liblinear(
X, target, C, fit_intercept, intercept_scaling, class_weight,
penalty, dual, verbose, max_iter, tol, random_state,
sample_weight=sample_weight)
if fit_intercept:
w0 = np.concatenate([coef_.ravel(), intercept_])
else:
w0 = coef_.ravel()
elif solver == 'sag':
if multi_class == 'multinomial':
target = target.astype(np.float64)
loss = 'multinomial'
else:
loss = 'log'
w0, n_iter_i, warm_start_sag = sag_solver(
X, target, sample_weight, loss, 1. / C, max_iter, tol,
verbose, random_state, False, max_squared_sum, warm_start_sag)
else:
raise ValueError("solver must be one of {'liblinear', 'lbfgs', "
"'newton-cg', 'sag'}, got '%s' instead" % solver)
if multi_class == 'multinomial':
multi_w0 = np.reshape(w0, (classes.size, -1))
if classes.size == 2:
multi_w0 = multi_w0[1][np.newaxis, :]
coefs.append(multi_w0)
else:
coefs.append(w0.copy())
n_iter[i] = n_iter_i
return coefs, np.array(Cs), n_iter
# helper function for LogisticCV
def _log_reg_scoring_path(X, y, train, test, pos_class=None, Cs=10,
scoring=None, fit_intercept=False,
max_iter=100, tol=1e-4, class_weight=None,
verbose=0, solver='lbfgs', penalty='l2',
dual=False, intercept_scaling=1.,
multi_class='ovr', random_state=None,
max_squared_sum=None, sample_weight=None):
"""Computes scores across logistic_regression_path
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target labels.
train : list of indices
The indices of the train set.
test : list of indices
The indices of the test set.
pos_class : int, None
The class with respect to which we perform a one-vs-all fit.
If None, then it is assumed that the given problem is binary.
Cs : list of floats | int
Each of the values in Cs describes the inverse of
regularization strength. If Cs is as an int, then a grid of Cs
values are chosen in a logarithmic scale between 1e-4 and 1e4.
If not provided, then a fixed set of values for Cs are used.
scoring : callable
For a list of scoring functions that can be used, look at
:mod:`sklearn.metrics`. The default scoring option used is
accuracy_score.
fit_intercept : bool
If False, then the bias term is set to zero. Else the last
term of each coef_ gives us the intercept.
max_iter : int
Maximum number of iterations for the solver.
tol : float
Tolerance for stopping criteria.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
verbose : int
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
solver : {'lbfgs', 'newton-cg', 'liblinear', 'sag'}
Decides which solver to use.
penalty : str, 'l1' or 'l2'
Used to specify the norm used in the penalization. The newton-cg and
lbfgs solvers support only l2 penalties.
dual : bool
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
intercept_scaling : float, default 1.
This parameter is useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
multi_class : str, {'ovr', 'multinomial'}
Multiclass option can be either 'ovr' or 'multinomial'. If the option
chosen is 'ovr', then a binary problem is fit for each label. Else
the loss minimised is the multinomial loss fit across
the entire probability distribution. Works only for the 'lbfgs' and
'newton-cg' solver.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data. Used only in solvers 'sag' and 'liblinear'.
max_squared_sum : float, default None
Maximum squared sum of X over samples. Used only in SAG solver.
If None, it will be computed, going through all the samples.
The value should be precomputed to speed up cross validation.
sample_weight : array-like, shape(n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
coefs : ndarray, shape (n_cs, n_features) or (n_cs, n_features + 1)
List of coefficients for the Logistic Regression model. If
fit_intercept is set to True then the second dimension will be
n_features + 1, where the last item represents the intercept.
Cs : ndarray
Grid of Cs used for cross-validation.
scores : ndarray, shape (n_cs,)
Scores obtained for each Cs.
n_iter : array, shape(n_cs,)
Actual number of iteration for each Cs.
"""
_check_solver_option(solver, multi_class, penalty, dual)
X_train = X[train]
X_test = X[test]
y_train = y[train]
y_test = y[test]
if sample_weight is not None:
sample_weight = sample_weight[train]
coefs, Cs, n_iter = logistic_regression_path(
X_train, y_train, Cs=Cs, fit_intercept=fit_intercept,
solver=solver, max_iter=max_iter, class_weight=class_weight,
pos_class=pos_class, multi_class=multi_class,
tol=tol, verbose=verbose, dual=dual, penalty=penalty,
intercept_scaling=intercept_scaling, random_state=random_state,
check_input=False, max_squared_sum=max_squared_sum,
sample_weight=sample_weight)
log_reg = LogisticRegression(fit_intercept=fit_intercept)
# The score method of Logistic Regression has a classes_ attribute.
if multi_class == 'ovr':
log_reg.classes_ = np.array([-1, 1])
elif multi_class == 'multinomial':
log_reg.classes_ = np.unique(y_train)
else:
raise ValueError("multi_class should be either multinomial or ovr, "
"got %d" % multi_class)
if pos_class is not None:
mask = (y_test == pos_class)
y_test = np.ones(y_test.shape, dtype=np.float64)
y_test[~mask] = -1.
# To deal with object dtypes, we need to convert into an array of floats.
y_test = check_array(y_test, dtype=np.float64, ensure_2d=False)
scores = list()
if isinstance(scoring, six.string_types):
scoring = SCORERS[scoring]
for w in coefs:
if multi_class == 'ovr':
w = w[np.newaxis, :]
if fit_intercept:
log_reg.coef_ = w[:, :-1]
log_reg.intercept_ = w[:, -1]
else:
log_reg.coef_ = w
log_reg.intercept_ = 0.
if scoring is None:
scores.append(log_reg.score(X_test, y_test))
else:
scores.append(scoring(log_reg, X_test, y_test))
return coefs, Cs, np.array(scores), n_iter
class LogisticRegression(BaseEstimator, LinearClassifierMixin,
_LearntSelectorMixin, SparseCoefMixin):
"""Logistic Regression (aka logit, MaxEnt) classifier.
In the multiclass case, the training algorithm uses the one-vs-rest (OvR)
scheme if the 'multi_class' option is set to 'ovr' and uses the cross-
entropy loss, if the 'multi_class' option is set to 'multinomial'.
(Currently the 'multinomial' option is supported only by the 'lbfgs',
'sag' and 'newton-cg' solvers.)
This class implements regularized logistic regression using the
'liblinear' library, 'newton-cg', 'sag' and 'lbfgs' solvers. It can handle
both dense and sparse input. Use C-ordered arrays or CSR matrices
containing 64-bit floats for optimal performance; any other input format
will be converted (and copied).
The 'newton-cg', 'sag', and 'lbfgs' solvers support only L2 regularization
with primal formulation. The 'liblinear' solver supports both L1 and L2
regularization, with a dual formulation only for the L2 penalty.
Read more in the :ref:`User Guide <logistic_regression>`.
Parameters
----------
penalty : str, 'l1' or 'l2', default: 'l2'
Used to specify the norm used in the penalization. The newton-cg, sag
and lbfgs solvers support only l2 penalties.
dual : bool, default: False
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
C : float, default: 1.0
Inverse of regularization strength; must be a positive float.
Like in support vector machines, smaller values specify stronger
regularization.
fit_intercept : bool, default: True
Specifies if a constant (a.k.a. bias or intercept) should be
added to the decision function.
intercept_scaling : float, default: 1
Useful only if solver is liblinear.
when self.fit_intercept is True, instance vector x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
class_weight : dict or 'balanced', default: None
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
.. versionadded:: 0.17
*class_weight='balanced'* instead of deprecated
*class_weight='auto'*.
max_iter : int, default: 100
Useful only for the newton-cg, sag and lbfgs solvers.
Maximum number of iterations taken for the solvers to converge.
random_state : int seed, RandomState instance, default: None
The seed of the pseudo random number generator to use when
shuffling the data. Used only in solvers 'sag' and 'liblinear'.
solver : {'newton-cg', 'lbfgs', 'liblinear', 'sag'}, default: 'liblinear'
Algorithm to use in the optimization problem.
- For small datasets, 'liblinear' is a good choice, whereas 'sag' is
faster for large ones.
- For multiclass problems, only 'newton-cg', 'sag' and 'lbfgs' handle
multinomial loss; 'liblinear' is limited to one-versus-rest
schemes.
- 'newton-cg', 'lbfgs' and 'sag' only handle L2 penalty.
Note that 'sag' fast convergence is only guaranteed on features with
approximately the same scale. You can preprocess the data with a
scaler from sklearn.preprocessing.
.. versionadded:: 0.17
Stochastic Average Gradient descent solver.
tol : float, default: 1e-4
Tolerance for stopping criteria.
multi_class : str, {'ovr', 'multinomial'}, default: 'ovr'
Multiclass option can be either 'ovr' or 'multinomial'. If the option
chosen is 'ovr', then a binary problem is fit for each label. Else
the loss minimised is the multinomial loss fit across
the entire probability distribution. Works only for the 'newton-cg',
'sag' and 'lbfgs' solver.
.. versionadded:: 0.18
Stochastic Average Gradient descent solver for 'multinomial' case.
verbose : int, default: 0
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
warm_start : bool, default: False
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
Useless for liblinear solver.
.. versionadded:: 0.17
*warm_start* to support *lbfgs*, *newton-cg*, *sag* solvers.
n_jobs : int, default: 1
Number of CPU cores used during the cross-validation loop. If given
a value of -1, all cores are used.
Attributes
----------
coef_ : array, shape (n_classes, n_features)
Coefficient of the features in the decision function.
intercept_ : array, shape (n_classes,)
Intercept (a.k.a. bias) added to the decision function.
If `fit_intercept` is set to False, the intercept is set to zero.
n_iter_ : array, shape (n_classes,) or (1, )
Actual number of iterations for all classes. If binary or multinomial,
it returns only 1 element. For liblinear solver, only the maximum
number of iteration across all classes is given.
See also
--------
SGDClassifier : incrementally trained logistic regression (when given
the parameter ``loss="log"``).
sklearn.svm.LinearSVC : learns SVM models using the same algorithm.
Notes
-----
The underlying C implementation uses a random number generator to
select features when fitting the model. It is thus not uncommon,
to have slightly different results for the same input data. If
that happens, try with a smaller tol parameter.
Predict output may not match that of standalone liblinear in certain
cases. See :ref:`differences from liblinear <liblinear_differences>`
in the narrative documentation.
References
----------
LIBLINEAR -- A Library for Large Linear Classification
http://www.csie.ntu.edu.tw/~cjlin/liblinear/
Hsiang-Fu Yu, Fang-Lan Huang, Chih-Jen Lin (2011). Dual coordinate descent
methods for logistic regression and maximum entropy models.
Machine Learning 85(1-2):41-75.
http://www.csie.ntu.edu.tw/~cjlin/papers/maxent_dual.pdf
"""
def __init__(self, penalty='l2', dual=False, tol=1e-4, C=1.0,
fit_intercept=True, intercept_scaling=1, class_weight=None,
random_state=None, solver='liblinear', max_iter=100,
multi_class='ovr', verbose=0, warm_start=False, n_jobs=1):
self.penalty = penalty
self.dual = dual
self.tol = tol
self.C = C
self.fit_intercept = fit_intercept
self.intercept_scaling = intercept_scaling
self.class_weight = class_weight
self.random_state = random_state
self.solver = solver
self.max_iter = max_iter
self.multi_class = multi_class
self.verbose = verbose
self.warm_start = warm_start
self.n_jobs = n_jobs
def fit(self, X, y, sample_weight=None):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,)
Target vector relative to X.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
.. versionadded:: 0.17
*sample_weight* support to LogisticRegression.
Returns
-------
self : object
Returns self.
"""
if not isinstance(self.C, numbers.Number) or self.C < 0:
raise ValueError("Penalty term must be positive; got (C=%r)"
% self.C)
if not isinstance(self.max_iter, numbers.Number) or self.max_iter < 0:
raise ValueError("Maximum number of iteration must be positive;"
" got (max_iter=%r)" % self.max_iter)
if not isinstance(self.tol, numbers.Number) or self.tol < 0:
raise ValueError("Tolerance for stopping criteria must be "
"positive; got (tol=%r)" % self.tol)
X, y = check_X_y(X, y, accept_sparse='csr', dtype=np.float64,
order="C")
check_classification_targets(y)
self.classes_ = np.unique(y)
n_samples, n_features = X.shape
_check_solver_option(self.solver, self.multi_class, self.penalty,
self.dual)
if self.solver == 'liblinear':
self.coef_, self.intercept_, n_iter_ = _fit_liblinear(
X, y, self.C, self.fit_intercept, self.intercept_scaling,
self.class_weight, self.penalty, self.dual, self.verbose,
self.max_iter, self.tol, self.random_state,
sample_weight=sample_weight)
self.n_iter_ = np.array([n_iter_])
return self
if self.solver == 'sag':
max_squared_sum = row_norms(X, squared=True).max()
else:
max_squared_sum = None
n_classes = len(self.classes_)
classes_ = self.classes_
if n_classes < 2:
raise ValueError("This solver needs samples of at least 2 classes"
" in the data, but the data contains only one"
" class: %r" % classes_[0])
if len(self.classes_) == 2:
n_classes = 1
classes_ = classes_[1:]
if self.warm_start:
warm_start_coef = getattr(self, 'coef_', None)
else:
warm_start_coef = None
if warm_start_coef is not None and self.fit_intercept:
warm_start_coef = np.append(warm_start_coef,
self.intercept_[:, np.newaxis],
axis=1)
self.coef_ = list()
self.intercept_ = np.zeros(n_classes)
# Hack so that we iterate only once for the multinomial case.
if self.multi_class == 'multinomial':
classes_ = [None]
warm_start_coef = [warm_start_coef]
if warm_start_coef is None:
warm_start_coef = [None] * n_classes
path_func = delayed(logistic_regression_path)
# The SAG solver releases the GIL so it's more efficient to use
# threads for this solver.
backend = 'threading' if self.solver == 'sag' else 'multiprocessing'
fold_coefs_ = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend=backend)(
path_func(X, y, pos_class=class_, Cs=[self.C],
fit_intercept=self.fit_intercept, tol=self.tol,
verbose=self.verbose, solver=self.solver, copy=False,
multi_class=self.multi_class, max_iter=self.max_iter,
class_weight=self.class_weight, check_input=False,
random_state=self.random_state, coef=warm_start_coef_,
max_squared_sum=max_squared_sum,
sample_weight=sample_weight)
for (class_, warm_start_coef_) in zip(classes_, warm_start_coef))
fold_coefs_, _, n_iter_ = zip(*fold_coefs_)
self.n_iter_ = np.asarray(n_iter_, dtype=np.int32)[:, 0]
if self.multi_class == 'multinomial':
self.coef_ = fold_coefs_[0][0]
else:
self.coef_ = np.asarray(fold_coefs_)
self.coef_ = self.coef_.reshape(n_classes, n_features +
int(self.fit_intercept))
if self.fit_intercept:
self.intercept_ = self.coef_[:, -1]
self.coef_ = self.coef_[:, :-1]
return self
def predict_proba(self, X):
"""Probability estimates.
The returned estimates for all classes are ordered by the
label of classes.
For a multi_class problem, if multi_class is set to be "multinomial"
the softmax function is used to find the predicted probability of
each class.
Else use a one-vs-rest approach, i.e calculate the probability
of each class assuming it to be positive using the logistic function.
and normalize these values across all the classes.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
T : array-like, shape = [n_samples, n_classes]
Returns the probability of the sample for each class in the model,
where classes are ordered as they are in ``self.classes_``.
"""
if not hasattr(self, "coef_"):
raise NotFittedError("Call fit before prediction")
calculate_ovr = self.coef_.shape[0] == 1 or self.multi_class == "ovr"
if calculate_ovr:
return super(LogisticRegression, self)._predict_proba_lr(X)
else:
return softmax(self.decision_function(X), copy=False)
def predict_log_proba(self, X):
"""Log of probability estimates.
The returned estimates for all classes are ordered by the
label of classes.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
T : array-like, shape = [n_samples, n_classes]
Returns the log-probability of the sample for each class in the
model, where classes are ordered as they are in ``self.classes_``.
"""
return np.log(self.predict_proba(X))
class LogisticRegressionCV(LogisticRegression, BaseEstimator,
LinearClassifierMixin, _LearntSelectorMixin):
"""Logistic Regression CV (aka logit, MaxEnt) classifier.
This class implements logistic regression using liblinear, newton-cg, sag
of lbfgs optimizer. The newton-cg, sag and lbfgs solvers support only L2
regularization with primal formulation. The liblinear solver supports both
L1 and L2 regularization, with a dual formulation only for the L2 penalty.
For the grid of Cs values (that are set by default to be ten values in
a logarithmic scale between 1e-4 and 1e4), the best hyperparameter is
selected by the cross-validator StratifiedKFold, but it can be changed
using the cv parameter. In the case of newton-cg and lbfgs solvers,
we warm start along the path i.e guess the initial coefficients of the
present fit to be the coefficients got after convergence in the previous
fit, so it is supposed to be faster for high-dimensional dense data.
For a multiclass problem, the hyperparameters for each class are computed
using the best scores got by doing a one-vs-rest in parallel across all
folds and classes. Hence this is not the true multinomial loss.
Read more in the :ref:`User Guide <logistic_regression>`.
Parameters
----------
Cs : list of floats | int
Each of the values in Cs describes the inverse of regularization
strength. If Cs is as an int, then a grid of Cs values are chosen
in a logarithmic scale between 1e-4 and 1e4.
Like in support vector machines, smaller values specify stronger
regularization.
fit_intercept : bool, default: True
Specifies if a constant (a.k.a. bias or intercept) should be
added to the decision function.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
.. versionadded:: 0.17
class_weight == 'balanced'
cv : integer or cross-validation generator
The default cross-validation generator used is Stratified K-Folds.
If an integer is provided, then it is the number of folds used.
See the module :mod:`sklearn.model_selection` module for the
list of possible cross-validation objects.
penalty : str, 'l1' or 'l2'
Used to specify the norm used in the penalization. The newton-cg and
lbfgs solvers support only l2 penalties.
dual : bool
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
scoring : callabale
Scoring function to use as cross-validation criteria. For a list of
scoring functions that can be used, look at :mod:`sklearn.metrics`.
The default scoring option used is accuracy_score.
solver : {'newton-cg', 'lbfgs', 'liblinear', 'sag'}
Algorithm to use in the optimization problem.
- For small datasets, 'liblinear' is a good choice, whereas 'sag' is
faster for large ones.
- For multiclass problems, only 'newton-cg', 'sag' and 'lbfgs' handle
multinomial loss; 'liblinear' is limited to one-versus-rest
schemes.
- 'newton-cg', 'lbfgs' and 'sag' only handle L2 penalty.
- 'liblinear' might be slower in LogisticRegressionCV because it does
not handle warm-starting.
Note that 'sag' fast convergence is only guaranteed on features with
approximately the same scale. You can preprocess the data with a
scaler from sklearn.preprocessing.
.. versionadded:: 0.17
Stochastic Average Gradient descent solver.
tol : float, optional
Tolerance for stopping criteria.
max_iter : int, optional
Maximum number of iterations of the optimization algorithm.
n_jobs : int, optional
Number of CPU cores used during the cross-validation loop. If given
a value of -1, all cores are used.
verbose : int
For the 'liblinear', 'sag' and 'lbfgs' solvers set verbose to any
positive number for verbosity.
refit : bool
If set to True, the scores are averaged across all folds, and the
coefs and the C that corresponds to the best score is taken, and a
final refit is done using these parameters.
Otherwise the coefs, intercepts and C that correspond to the
best scores across folds are averaged.
multi_class : str, {'ovr', 'multinomial'}
Multiclass option can be either 'ovr' or 'multinomial'. If the option
chosen is 'ovr', then a binary problem is fit for each label. Else
the loss minimised is the multinomial loss fit across
the entire probability distribution. Works only for the 'newton-cg',
'sag' and 'lbfgs' solver.
.. versionadded:: 0.18
Stochastic Average Gradient descent solver for 'multinomial' case.
intercept_scaling : float, default 1.
Useful only if solver is liblinear.
This parameter is useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
Attributes
----------
coef_ : array, shape (1, n_features) or (n_classes, n_features)
Coefficient of the features in the decision function.
`coef_` is of shape (1, n_features) when the given problem
is binary.
`coef_` is readonly property derived from `raw_coef_` that
follows the internal memory layout of liblinear.
intercept_ : array, shape (1,) or (n_classes,)
Intercept (a.k.a. bias) added to the decision function.
It is available only when parameter intercept is set to True
and is of shape(1,) when the problem is binary.
Cs_ : array
Array of C i.e. inverse of regularization parameter values used
for cross-validation.
coefs_paths_ : array, shape ``(n_folds, len(Cs_), n_features)`` or \
``(n_folds, len(Cs_), n_features + 1)``
dict with classes as the keys, and the path of coefficients obtained
during cross-validating across each fold and then across each Cs
after doing an OvR for the corresponding class as values.
If the 'multi_class' option is set to 'multinomial', then
the coefs_paths are the coefficients corresponding to each class.
Each dict value has shape ``(n_folds, len(Cs_), n_features)`` or
``(n_folds, len(Cs_), n_features + 1)`` depending on whether the
intercept is fit or not.
scores_ : dict
dict with classes as the keys, and the values as the
grid of scores obtained during cross-validating each fold, after doing
an OvR for the corresponding class. If the 'multi_class' option
given is 'multinomial' then the same scores are repeated across
all classes, since this is the multinomial class.
Each dict value has shape (n_folds, len(Cs))
C_ : array, shape (n_classes,) or (n_classes - 1,)
Array of C that maps to the best scores across every class. If refit is
set to False, then for each class, the best C is the average of the
C's that correspond to the best scores for each fold.
n_iter_ : array, shape (n_classes, n_folds, n_cs) or (1, n_folds, n_cs)
Actual number of iterations for all classes, folds and Cs.
In the binary or multinomial cases, the first dimension is equal to 1.
See also
--------
LogisticRegression
"""
def __init__(self, Cs=10, fit_intercept=True, cv=None, dual=False,
penalty='l2', scoring=None, solver='lbfgs', tol=1e-4,
max_iter=100, class_weight=None, n_jobs=1, verbose=0,
refit=True, intercept_scaling=1., multi_class='ovr',
random_state=None):
self.Cs = Cs
self.fit_intercept = fit_intercept
self.cv = cv
self.dual = dual
self.penalty = penalty
self.scoring = scoring
self.tol = tol
self.max_iter = max_iter
self.class_weight = class_weight
self.n_jobs = n_jobs
self.verbose = verbose
self.solver = solver
self.refit = refit
self.intercept_scaling = intercept_scaling
self.multi_class = multi_class
self.random_state = random_state
def fit(self, X, y, sample_weight=None):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,)
Target vector relative to X.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
self : object
Returns self.
"""
_check_solver_option(self.solver, self.multi_class, self.penalty,
self.dual)
if not isinstance(self.max_iter, numbers.Number) or self.max_iter < 0:
raise ValueError("Maximum number of iteration must be positive;"
" got (max_iter=%r)" % self.max_iter)
if not isinstance(self.tol, numbers.Number) or self.tol < 0:
raise ValueError("Tolerance for stopping criteria must be "
"positive; got (tol=%r)" % self.tol)
X, y = check_X_y(X, y, accept_sparse='csr', dtype=np.float64,
order="C")
if self.solver == 'sag':
max_squared_sum = row_norms(X, squared=True).max()
else:
max_squared_sum = None
check_classification_targets(y)
if y.ndim == 2 and y.shape[1] == 1:
warnings.warn(
"A column-vector y was passed when a 1d array was"
" expected. Please change the shape of y to "
"(n_samples, ), for example using ravel().",
DataConversionWarning)
y = np.ravel(y)
check_consistent_length(X, y)
# init cross-validation generator
cv = check_cv(self.cv, y, classifier=True)
folds = list(cv.split(X, y))
self._enc = LabelEncoder()
self._enc.fit(y)
labels = self.classes_ = np.unique(y)
n_classes = len(labels)
if n_classes < 2:
raise ValueError("This solver needs samples of at least 2 classes"
" in the data, but the data contains only one"
" class: %r" % self.classes_[0])
if n_classes == 2:
# OvR in case of binary problems is as good as fitting
# the higher label
n_classes = 1
labels = labels[1:]
# We need this hack to iterate only once over labels, in the case of
# multi_class = multinomial, without changing the value of the labels.
iter_labels = labels
if self.multi_class == 'multinomial':
iter_labels = [None]
if self.class_weight and not (isinstance(self.class_weight, dict) or
self.class_weight in
['balanced', 'auto']):
# 'auto' is deprecated and will be removed in 0.19
raise ValueError("class_weight provided should be a "
"dict or 'balanced'")
# compute the class weights for the entire dataset y
if self.class_weight in ("auto", "balanced"):
classes = np.unique(y)
class_weight = compute_class_weight(self.class_weight, classes, y)
class_weight = dict(zip(classes, class_weight))
else:
class_weight = self.class_weight
path_func = delayed(_log_reg_scoring_path)
# The SAG solver releases the GIL so it's more efficient to use
# threads for this solver.
backend = 'threading' if self.solver == 'sag' else 'multiprocessing'
fold_coefs_ = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend=backend)(
path_func(X, y, train, test, pos_class=label, Cs=self.Cs,
fit_intercept=self.fit_intercept, penalty=self.penalty,
dual=self.dual, solver=self.solver, tol=self.tol,
max_iter=self.max_iter, verbose=self.verbose,
class_weight=class_weight, scoring=self.scoring,
multi_class=self.multi_class,
intercept_scaling=self.intercept_scaling,
random_state=self.random_state,
max_squared_sum=max_squared_sum,
sample_weight=sample_weight
)
for label in iter_labels
for train, test in folds)
if self.multi_class == 'multinomial':
multi_coefs_paths, Cs, multi_scores, n_iter_ = zip(*fold_coefs_)
multi_coefs_paths = np.asarray(multi_coefs_paths)
multi_scores = np.asarray(multi_scores)
# This is just to maintain API similarity between the ovr and
# multinomial option.
# Coefs_paths in now n_folds X len(Cs) X n_classes X n_features
# we need it to be n_classes X len(Cs) X n_folds X n_features
# to be similar to "ovr".
coefs_paths = np.rollaxis(multi_coefs_paths, 2, 0)
# Multinomial has a true score across all labels. Hence the
# shape is n_folds X len(Cs). We need to repeat this score
# across all labels for API similarity.
scores = np.tile(multi_scores, (n_classes, 1, 1))
self.Cs_ = Cs[0]
self.n_iter_ = np.reshape(n_iter_, (1, len(folds),
len(self.Cs_)))
else:
coefs_paths, Cs, scores, n_iter_ = zip(*fold_coefs_)
self.Cs_ = Cs[0]
coefs_paths = np.reshape(coefs_paths, (n_classes, len(folds),
len(self.Cs_), -1))
self.n_iter_ = np.reshape(n_iter_, (n_classes, len(folds),
len(self.Cs_)))
self.coefs_paths_ = dict(zip(labels, coefs_paths))
scores = np.reshape(scores, (n_classes, len(folds), -1))
self.scores_ = dict(zip(labels, scores))
self.C_ = list()
self.coef_ = np.empty((n_classes, X.shape[1]))
self.intercept_ = np.zeros(n_classes)
# hack to iterate only once for multinomial case.
if self.multi_class == 'multinomial':
scores = multi_scores
coefs_paths = multi_coefs_paths
for index, label in enumerate(iter_labels):
if self.multi_class == 'ovr':
scores = self.scores_[label]
coefs_paths = self.coefs_paths_[label]
if self.refit:
best_index = scores.sum(axis=0).argmax()
C_ = self.Cs_[best_index]
self.C_.append(C_)
if self.multi_class == 'multinomial':
coef_init = np.mean(coefs_paths[:, best_index, :, :],
axis=0)
else:
coef_init = np.mean(coefs_paths[:, best_index, :], axis=0)
w, _, _ = logistic_regression_path(
X, y, pos_class=label, Cs=[C_], solver=self.solver,
fit_intercept=self.fit_intercept, coef=coef_init,
max_iter=self.max_iter, tol=self.tol,
penalty=self.penalty, copy=False,
class_weight=class_weight,
multi_class=self.multi_class,
verbose=max(0, self.verbose - 1),
random_state=self.random_state,
check_input=False, max_squared_sum=max_squared_sum,
sample_weight=sample_weight)
w = w[0]
else:
# Take the best scores across every fold and the average of all
# coefficients corresponding to the best scores.
best_indices = np.argmax(scores, axis=1)
w = np.mean([coefs_paths[i][best_indices[i]]
for i in range(len(folds))], axis=0)
self.C_.append(np.mean(self.Cs_[best_indices]))
if self.multi_class == 'multinomial':
self.C_ = np.tile(self.C_, n_classes)
self.coef_ = w[:, :X.shape[1]]
if self.fit_intercept:
self.intercept_ = w[:, -1]
else:
self.coef_[index] = w[: X.shape[1]]
if self.fit_intercept:
self.intercept_[index] = w[-1]
self.C_ = np.asarray(self.C_)
return self
|
mit
|
DailyActie/Surrogate-Model
|
01-codes/scikit-learn-master/sklearn/neural_network/tests/test_rbm.py
|
1
|
6273
|
import re
import sys
import numpy as np
from scipy.sparse import csc_matrix, csr_matrix, lil_matrix
from sklearn.datasets import load_digits
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.neural_network import BernoulliRBM
from sklearn.utils.testing import (assert_almost_equal, assert_array_equal,
assert_true)
from sklearn.utils.validation import assert_all_finite
np.seterr(all='warn')
Xdigits = load_digits().data
Xdigits -= Xdigits.min()
Xdigits /= Xdigits.max()
def test_fit():
X = Xdigits.copy()
rbm = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=10, n_iter=7, random_state=9)
rbm.fit(X)
assert_almost_equal(rbm.score_samples(X).mean(), -21., decimal=0)
# in-place tricks shouldn't have modified X
assert_array_equal(X, Xdigits)
def test_partial_fit():
X = Xdigits.copy()
rbm = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=20, random_state=9)
n_samples = X.shape[0]
n_batches = int(np.ceil(float(n_samples) / rbm.batch_size))
batch_slices = np.array_split(X, n_batches)
for i in range(7):
for batch in batch_slices:
rbm.partial_fit(batch)
assert_almost_equal(rbm.score_samples(X).mean(), -21., decimal=0)
assert_array_equal(X, Xdigits)
def test_transform():
X = Xdigits[:100]
rbm1 = BernoulliRBM(n_components=16, batch_size=5,
n_iter=5, random_state=42)
rbm1.fit(X)
Xt1 = rbm1.transform(X)
Xt2 = rbm1._mean_hiddens(X)
assert_array_equal(Xt1, Xt2)
def test_small_sparse():
# BernoulliRBM should work on small sparse matrices.
X = csr_matrix(Xdigits[:4])
BernoulliRBM().fit(X) # no exception
def test_small_sparse_partial_fit():
for sparse in [csc_matrix, csr_matrix]:
X_sparse = sparse(Xdigits[:100])
X = Xdigits[:100].copy()
rbm1 = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=10, random_state=9)
rbm2 = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=10, random_state=9)
rbm1.partial_fit(X_sparse)
rbm2.partial_fit(X)
assert_almost_equal(rbm1.score_samples(X).mean(),
rbm2.score_samples(X).mean(),
decimal=0)
def test_sample_hiddens():
rng = np.random.RandomState(0)
X = Xdigits[:100]
rbm1 = BernoulliRBM(n_components=2, batch_size=5,
n_iter=5, random_state=42)
rbm1.fit(X)
h = rbm1._mean_hiddens(X[0])
hs = np.mean([rbm1._sample_hiddens(X[0], rng) for i in range(100)], 0)
assert_almost_equal(h, hs, decimal=1)
def test_fit_gibbs():
# Gibbs on the RBM hidden layer should be able to recreate [[0], [1]]
# from the same input
rng = np.random.RandomState(42)
X = np.array([[0.], [1.]])
rbm1 = BernoulliRBM(n_components=2, batch_size=2,
n_iter=42, random_state=rng)
# you need that much iters
rbm1.fit(X)
assert_almost_equal(rbm1.components_,
np.array([[0.02649814], [0.02009084]]), decimal=4)
assert_almost_equal(rbm1.gibbs(X), X)
return rbm1
def test_fit_gibbs_sparse():
# Gibbs on the RBM hidden layer should be able to recreate [[0], [1]] from
# the same input even when the input is sparse, and test against non-sparse
rbm1 = test_fit_gibbs()
rng = np.random.RandomState(42)
from scipy.sparse import csc_matrix
X = csc_matrix([[0.], [1.]])
rbm2 = BernoulliRBM(n_components=2, batch_size=2,
n_iter=42, random_state=rng)
rbm2.fit(X)
assert_almost_equal(rbm2.components_,
np.array([[0.02649814], [0.02009084]]), decimal=4)
assert_almost_equal(rbm2.gibbs(X), X.toarray())
assert_almost_equal(rbm1.components_, rbm2.components_)
def test_gibbs_smoke():
# Check if we don't get NaNs sampling the full digits dataset.
# Also check that sampling again will yield different results.
X = Xdigits
rbm1 = BernoulliRBM(n_components=42, batch_size=40,
n_iter=20, random_state=42)
rbm1.fit(X)
X_sampled = rbm1.gibbs(X)
assert_all_finite(X_sampled)
X_sampled2 = rbm1.gibbs(X)
assert_true(np.all((X_sampled != X_sampled2).max(axis=1)))
def test_score_samples():
# Test score_samples (pseudo-likelihood) method.
# Assert that pseudo-likelihood is computed without clipping.
# See Fabian's blog, http://bit.ly/1iYefRk
rng = np.random.RandomState(42)
X = np.vstack([np.zeros(1000), np.ones(1000)])
rbm1 = BernoulliRBM(n_components=10, batch_size=2,
n_iter=10, random_state=rng)
rbm1.fit(X)
assert_true((rbm1.score_samples(X) < -300).all())
# Sparse vs. dense should not affect the output. Also test sparse input
# validation.
rbm1.random_state = 42
d_score = rbm1.score_samples(X)
rbm1.random_state = 42
s_score = rbm1.score_samples(lil_matrix(X))
assert_almost_equal(d_score, s_score)
# Test numerical stability (#2785): would previously generate infinities
# and crash with an exception.
with np.errstate(under='ignore'):
rbm1.score_samples([np.arange(1000) * 100])
def test_rbm_verbose():
rbm = BernoulliRBM(n_iter=2, verbose=10)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
rbm.fit(Xdigits)
finally:
sys.stdout = old_stdout
def test_sparse_and_verbose():
# Make sure RBM works with sparse input when verbose=True
old_stdout = sys.stdout
sys.stdout = StringIO()
from scipy.sparse import csc_matrix
X = csc_matrix([[0.], [1.]])
rbm = BernoulliRBM(n_components=2, batch_size=2, n_iter=1,
random_state=42, verbose=True)
try:
rbm.fit(X)
s = sys.stdout.getvalue()
# make sure output is sound
assert_true(re.match(r"\[BernoulliRBM\] Iteration 1,"
r" pseudo-likelihood = -?(\d)+(\.\d+)?,"
r" time = (\d|\.)+s",
s))
finally:
sys.stdout = old_stdout
|
mit
|
henryroe/PyNOMAD
|
nomad/nomad.py
|
1
|
39768
|
import os.path
import os
from pandas import DataFrame
from pandas.io.parsers import read_csv
import numpy as np
import datetime
import gzip
import sys
import pickle
import glob
base_dir = os.path.abspath(os.path.dirname(__file__))
about = {}
with open(os.path.join(base_dir, "__about__.py")) as f:
exec(f.read(), about)
__version__ = about["__version__"]
class Error(Exception):
pass
def _find_nomad_dir():
"""
Will look for a NOMAD catalog directory in the following order and use the first found:
as specified by environment variable: $NOMAD_DIR
~/nomad
~/nomad_gz
Note that this routine does no integrity checking beyond simply checking for the existence
of the directory.
"""
paths_to_search = [os.environ.get('NOMAD_DIR')]
paths_to_search.append('~/nomad/')
paths_to_search.append('~/nomad_gz/')
for cur_path in glob.glob('/Volumes/*/nomad'):
paths_to_search.append(cur_path)
for cur_path in glob.glob('/Volumes/*/nomad_gz'):
paths_to_search.append(cur_path)
for cur_path_to_test in paths_to_search:
if cur_path_to_test is not None:
if os.path.isdir(os.path.expanduser(cur_path_to_test)):
return os.path.expanduser(cur_path_to_test)
print "nomad.py: No nomad installation found. "
print " User needs to specifiy location with, e.g.:"
print " nomad.set_nomad_path('~/my_nomad_dir')"
return None
def set_nomad_path(nomad_path):
global _nomad_dir
_nomad_dir = nomad_path
_nomad_dir = _find_nomad_dir()
_nomad_record_length_bytes = 22 * 4
"""
Following listing of number of stars in each USNO-B1.0 was generated by running the following code in a full
install of the catalog. (Note: only the accelerator files are needed.)
This is used for decoding the integer entry in the NOMAD catalog into a USNO B1.0 star number.
TODO: document code to generate the _usnob1_stars_per_file
Here's a quick first version....needs work:
def play_with_USNOB_numbers():
filelist = glob.glob('*/*.acc')
nstars_per_file = np.zeros([len(filelist)], dtype=np.int)
for i, curfilename in enumerate(filelist):
print curfilename
lastline = open(curfilename, 'r').readlines()[-1]
nstars_per_file[i] = int(lastline.split()[1]) + int(lastline.split()[2]) - 1
f = open('acc_summary.txt', 'w')
for i, curfilename in enumerate(filelist):
f.write(curfilename[5:9] + ' ' + str(nstars_per_file[i]) + '\n')
f.close()
"""
_usnob1_stars_per_file = np.array([322, 1169, 1823, 2612, 3371, 4139, 4808, 5765, 6444, 7322, 8016, 8909, 9670, 10314,
11263, 11835, 13190, 14973, 16231, 17700, 20669, 23184, 26816, 28304, 31435, 33428,
35381, 37796, 38808, 39156, 41500, 44794, 46042, 47331, 48725, 50009, 51695, 52946,
52240, 52085, 53698, 54894, 56096, 56971, 57452, 57496, 57018, 58063, 58169, 59731,
59652, 61062, 61576, 61485, 61466, 63310, 63306, 63681, 64476, 65877, 65452, 66213,
67216, 67051, 68046, 68058, 69627, 73052, 75798, 78582, 82872, 87571, 92682, 97756,
100864, 104973, 105860, 109878, 109098, 109109, 108937, 106702, 106182, 105003,
105254, 102218, 100767, 100516, 99471, 100766, 100316, 101832, 102376, 102868, 104234,
104787, 108139, 109205, 112527, 111954, 113577, 115244, 116881, 117955, 118217,
118434, 119929, 121465, 122813, 123836, 124975, 126550, 128144, 128939, 130428,
132562, 135897, 141742, 146595, 153319, 159736, 169726, 176177, 183895, 191318,
193234, 193022, 194765, 194256, 196578, 193996, 191457, 190619, 188074, 186312,
183860, 185818, 187596, 190087, 193349, 197948, 200828, 205082, 210720, 214383,
219350, 222625, 226665, 231444, 235156, 238118, 241692, 246924, 250497, 255584,
258416, 262831, 266058, 270435, 276428, 284383, 288846, 297821, 303760, 313491,
317863, 326925, 338311, 351065, 364218, 447046, 474491, 506817, 520876, 522432,
531027, 539203, 542403, 552881, 546832, 540962, 514613, 493017, 473456, 462544,
455625, 455781, 461743, 466257, 465629, 475433, 480679, 500968, 508664, 519432,
520393, 528083, 524984, 529948, 529990, 532562, 533738, 529047, 536051, 535179,
542508, 544411, 544365, 541708, 545235, 561027, 562366, 577766, 575458, 577985,
571268, 618881, 659641, 709672, 740766, 791860, 835359, 863263, 885092, 884682,
899939, 901092, 904390, 897394, 874051, 800560, 770452, 749253, 735291, 734112,
724409, 733649, 741701, 753091, 758987, 763966, 777820, 774044, 789055, 786689,
798779, 795057, 789391, 775359, 778750, 798657, 793448, 806121, 784114, 777804,
753972, 761240, 750261, 754235, 754080, 748366, 763460, 751827, 741396, 717554,
703738, 734661, 791084, 843736, 861463, 889380, 915776, 946470, 973619, 993332,
1015788, 1011352, 1011954, 997293, 951337, 896364, 864787, 857368, 838802, 847162,
831316, 831901, 820314, 832769, 853858, 851011, 870906, 865702, 893468, 882305,
880668, 842939, 837743, 832686, 828752, 821287, 801296, 812505, 811583, 824625,
818959, 831240, 825865, 811980, 794296, 780852, 783875, 774490, 788995, 786836,
797661, 863870, 966507, 1046615, 1072969, 1102005, 1130148, 1165979, 1163214, 1153892,
1129347, 1102423, 1101112, 1087203, 1046206, 974479, 922956, 898909, 871262, 858917,
847837, 853590, 853808, 853815, 846772, 841541, 834116, 829740, 834581, 830479,
846752, 846831, 856890, 853485, 853606, 863551, 856903, 862409, 856026, 859597,
861065, 861778, 860714, 873144, 867688, 859780, 860158, 862803, 866821, 856985,
865025, 890419, 971260, 1023891, 1058944, 1076095, 1110507, 1140340, 1145636, 1137893,
1115017, 1124505, 1115969, 1107457, 1074291, 1013663, 953747, 908479, 888285, 869566,
859697, 850990, 838367, 827445, 829539, 823663, 821849, 811016, 821243, 810051,
811644, 802577, 804441, 799059, 801840, 809837, 810168, 817468, 806812, 809270,
806444, 805345, 793847, 794984, 786950, 785519, 794386, 793537, 800047, 788800,
784113, 798537, 837428, 881388, 902530, 932286, 954198, 1003754, 1015007, 1018182,
1023316, 1017370, 998837, 979964, 957416, 913907, 890150, 863365, 831875, 811066,
792736, 795556, 790541, 799553, 798399, 796011, 807937, 801041, 804041, 796383,
814232, 816772, 823262, 812829, 808416, 812822, 812356, 818952, 809627, 816703,
801619, 797817, 798938, 784110, 777981, 765208, 762904, 752207, 758313, 756745,
760019, 781504, 827233, 881073, 908340, 928394, 947256, 977446, 980754, 981545,
996029, 990340, 987472, 975320, 955815, 925982, 890605, 853333, 824823, 817005,
796934, 802122, 800158, 798902, 813649, 813514, 822576, 808642, 823352, 822189,
831006, 818684, 821089, 828229, 828925, 851158, 841893, 852417, 833952, 843748,
832727, 828306, 829921, 814599, 820888, 810625, 821353, 805791, 818666, 815668,
817855, 836504, 866459, 923186, 971176, 1025919, 1050997, 1061632, 1076337, 1079128,
1084733, 1073519, 1063928, 1038123, 1012116, 987805, 945640, 909852, 878057, 865007,
839846, 833170, 830072, 820076, 835108, 838074, 862547, 851215, 865667, 850245,
847904, 848330, 848791, 865642, 866930, 886415, 876883, 896017, 886032, 902264,
900391, 898502, 906123, 898675, 911671, 901295, 907274, 912495, 936105, 941411,
955211, 983534, 1025590, 1114628, 1172038, 1215131, 1244975, 1253647, 1263933,
1250837, 1261017, 1236270, 1235517, 1167324, 1142490, 1095666, 1040985, 981374,
942190, 930801, 906678, 912256, 918172, 919109, 923125, 913147, 931106, 931321,
947449, 933094, 950038, 953099, 962441, 961168, 960823, 973313, 963788, 979653,
969694, 980824, 969303, 970319, 967812, 964190, 970973, 955431, 965934, 952192,
955970, 948235, 952093, 968590, 994247, 1062480, 1128788, 1171130, 1189988, 1208295,
1209280, 1207908, 1197858, 1192690, 1192276, 1182737, 1173208, 1156118, 1110535,
1064735, 1027051, 1015429, 989776, 998816, 984063, 964857, 944631, 933943, 938201,
927898, 928696, 919642, 920576, 915230, 912776, 914202, 901268, 919043, 901830,
900752, 885705, 888764, 879451, 872961, 880140, 869512, 896357, 881205, 905829,
901079, 920986, 924005, 931153, 954238, 971425, 1025145, 1049405, 1072551, 1070955,
1082510, 1090958, 1077112, 1091427, 1085273, 1104951, 1094398, 1079142, 1048757,
1013855, 972329, 946217, 931976, 897879, 902637, 894856, 907610, 923233, 914852,
915237, 899664, 900111, 872653, 880313, 873300, 879019, 869390, 849960, 852684,
841266, 844413, 836404, 848976, 834699, 838707, 845364, 842967, 864622, 852018,
862912, 848788, 861695, 859158, 869598, 907354, 956874, 1032335, 1095744, 1165693,
1192294, 1213210, 1191558, 1186715, 1182222, 1175192, 1180628, 1167702, 1131600,
1076789, 1023082, 961229, 913796, 879026, 854001, 842509, 843926, 843978, 840987,
842199, 856655, 861666, 874152, 866637, 860745, 843822, 833936, 833694, 831613,
832108, 813658, 800480, 789885, 777600, 780385, 778480, 787198, 775632, 774415,
748643, 743469, 733440, 732730, 729742, 718681, 740278, 763054, 797479, 839794,
854173, 872460, 872603, 877553, 862310, 873977, 866733, 873257, 879989, 852710,
825179, 786206, 761117, 723453, 691978, 673300, 668379, 674323, 663918, 671450,
665742, 670897, 670786, 672742, 671784, 670530, 670698, 663672, 672810, 664340,
675951, 682507, 699103, 697539, 692890, 695494, 674927, 682403, 676852, 690950,
700196, 704829, 709933, 703007, 698655, 694245, 723108, 740001, 766278, 790879,
804176, 826479, 821534, 832035, 819058, 833255, 834309, 843351, 855745, 834477,
815814, 783309, 755027, 712579, 687390, 673552, 666754, 676873, 664021, 674878,
657130, 667065, 656510, 663071, 656034, 651656, 652763, 639297, 648935, 630923,
639273, 615646, 616161, 613875, 606619, 603874, 597252, 598004, 589597, 592912,
581004, 594559, 593476, 605153, 597423, 589421, 604840, 631893, 664305, 707212,
747060, 792071, 820250, 815844, 807824, 822160, 849806, 876601, 888123, 852386,
799267, 763133, 718566, 678505, 633194, 621620, 623982, 627788, 625720, 632535,
635559, 643170, 640126, 629342, 630428, 631616, 632445, 630816, 634848, 627222,
618275, 611568, 603359, 596777, 586464, 582391, 577854, 576103, 569710, 569350,
571789, 571260, 574800, 576414, 579059, 577430, 593965, 619864, 645204, 669052,
702009, 742807, 764986, 780018, 781123, 776539, 783938, 788930, 807308, 790220,
765563, 741838, 717930, 695730, 664275, 643882, 642098, 642365, 629820, 624176,
607997, 603489, 594044, 588724, 592261, 596149, 593362, 594039, 598475, 595575,
601206, 600351, 599035, 596706, 592342, 591874, 591837, 598711, 595284, 597248,
596380, 601362, 603373, 603747, 606689, 610520, 619766, 630999, 651163, 667385,
698108, 719813, 741611, 749112, 758757, 755403, 758786, 767261, 768735, 757872,
742972, 743307, 731274, 704412, 667731, 640137, 635453, 626702, 624245, 616278,
623782, 619458, 628116, 622453, 628185, 635745, 638405, 639270, 632835, 630937,
618282, 631044, 628834, 636031, 635388, 642654, 639339, 637234, 638553, 638800,
647245, 644644, 648647, 650628, 662107, 665017, 673958, 695559, 704024, 728346,
753921, 772425, 780281, 776160, 775949, 761855, 773536, 783706, 791463, 775111,
747451, 730428, 705635, 671844, 641180, 633579, 632745, 632557, 629828, 632395,
645726, 659216, 670185, 668101, 664902, 645316, 648679, 652791, 643182, 641084,
636908, 649242, 659335, 659641, 654499, 658395, 667528, 671868, 657477, 646906,
629719, 627748, 630478, 632224, 628109, 616424, 620070, 646785, 659809, 671220,
698920, 717289, 725426, 726634, 726764, 725043, 741966, 754867, 761604, 748728,
725916, 718643, 706393, 670605, 635108, 621064, 622262, 622388, 619354, 612384,
611398, 604836, 607535, 598642, 600029, 602414, 608602, 611197, 605759, 604278,
602127, 613763, 620022, 625641, 617278, 607739, 604246, 603566, 608764, 601454,
602523, 603622, 607285, 607095, 602270, 607675, 616170, 633463, 628318, 628051,
643327, 664600, 673305, 674060, 663089, 665555, 665586, 688754, 694943, 688934,
671743, 660616, 633827, 599672, 577467, 567021, 572920, 570797, 571898, 565752,
563759, 571296, 566548, 571037, 575457, 580872, 579815, 587218, 587837, 584257,
581551, 577260, 580059, 580159, 576568, 565827, 563079, 559791, 558592, 567125,
577348, 593586, 596419, 604727, 598518, 604098, 607430, 619733, 624515, 643479,
667625, 690647, 713805, 712252, 730527, 724866, 749270, 767527, 767875, 742339,
721014, 708088, 687187, 655547, 620770, 607893, 605617, 609174, 605391, 604347,
601973, 598739, 601223, 591592, 594341, 594975, 601704, 604162, 607234, 610673,
607087, 607124, 609425, 610198, 610285, 609620, 609060, 602002, 601336, 595735,
597116, 588190, 588199, 595117, 597379, 595403, 602631, 616215, 625329, 633866,
658347, 691286, 713340, 714644, 714635, 704288, 714249, 721372, 729730, 709481,
688510, 684853, 670205, 644315, 616706, 596111, 587141, 585490, 565188, 551870,
546328, 540621, 541346, 534166, 534550, 534626, 535416, 530613, 532251, 530168,
526970, 530051, 529156, 535877, 537258, 534850, 533806, 539451, 548648, 546408,
551355, 548508, 553019, 555004, 553330, 553480, 558540, 574683, 579628, 594387,
614615, 643171, 657319, 655080, 650883, 646620, 647011, 651907, 659411, 650505,
641111, 634694, 619528, 583934, 547719, 529163, 531819, 531812, 535515, 532914,
527093, 529761, 531871, 538065, 532539, 539309, 545362, 543276, 531019, 515888,
510226, 506344, 507127, 508090, 509898, 511681, 510472, 510010, 510473, 505811,
501066, 503202, 502406, 504765, 505126, 508238, 517499, 533539, 543109, 555173,
572238, 590839, 599050, 608497, 608025, 611051, 616761, 631594, 639659, 626523,
616043, 610075, 589992, 548617, 515310, 504203, 506188, 505448, 504102, 508348,
513592, 531112, 533835, 535224, 527241, 523074, 521076, 523680, 526732, 526107,
529441, 526697, 533742, 534503, 540544, 542393, 543958, 545404, 532496, 527881,
513914, 518901, 519567, 524723, 523325, 520438, 530161, 539585, 550303, 560301,
587550, 610333, 623556, 624288, 621272, 618064, 622105, 640220, 638409, 628512,
616676, 607554, 591709, 562659, 525581, 512612, 510778, 511069, 509121, 512681,
513979, 515597, 511884, 511478, 514213, 514903, 513072, 510277, 513277, 510760,
516690, 525518, 531812, 530483, 521095, 512181, 513305, 514874, 516989, 512115,
508831, 509245, 510147, 520588, 513707, 516155, 531626, 542884, 543776, 539162,
549710, 566509, 580444, 579941, 577312, 577250, 587003, 595975, 597743, 591007,
580555, 580288, 559924, 524787, 487371, 475833, 475612, 476569, 477348, 475570,
473628, 477141, 477030, 481182, 478507, 479390, 477442, 474191, 473339, 473009,
473713, 472977, 470844, 472214, 472523, 472061, 472030, 471410, 471507, 472858,
476143, 486121, 489225, 486468, 482263, 478699, 487815, 497133, 509344, 512434,
522717, 544826, 562905, 562495, 553240, 555856, 564198, 564917, 556492, 541208,
525671, 514605, 495378, 461561, 424153, 402854, 403134, 402111, 404147, 400673,
401065, 403070, 396698, 398623, 396657, 397181, 394883, 395207, 394946, 392446,
389395, 387282, 390170, 388304, 389743, 386621, 382795, 381773, 383691, 382188,
376917, 379686, 378402, 379012, 373939, 376086, 380985, 388606, 390211, 393621,
414970, 443761, 463310, 465440, 452816, 454306, 461810, 467816, 470141, 459365,
451148, 442374, 434480, 402889, 364779, 353583, 358213, 355153, 341431, 334162,
328670, 328709, 324541, 321846, 317608, 315979, 312642, 309174, 307054, 300600,
299452, 295918, 295151, 289072, 282992, 279918, 275416, 272764, 268366, 268855,
264701, 261930, 260938, 260418, 257476, 256779, 265033, 271019, 276184, 276235,
286734, 300901, 314773, 309571, 309265, 310233, 317127, 326175, 319614, 318596,
316229, 311031, 302046, 273612, 248909, 236143, 233707, 230021, 223559, 222571,
221911, 224098, 221258, 221052, 221941, 223110, 222369, 220551, 213810, 207055,
184366, 182059, 182656, 181529, 180631, 179082, 179881, 177987, 176422, 177562,
175653, 173335, 173246, 171247, 171860, 172172, 174144, 177698, 178925, 180430,
183724, 188353, 190485, 186900, 185941, 187083, 187591, 188037, 185679, 182466,
173851, 165604, 159467, 150686, 142028, 137444, 138053, 139047, 138806, 141959,
139537, 141096, 140410, 140371, 137283, 134779, 132918, 130988, 129999, 126713,
126221, 122385, 123613, 122767, 123305, 123290, 121944, 131538, 126693, 123228,
119917, 118425, 116271, 114670, 117688, 117271, 120443, 120351, 124310, 128089,
134592, 138347, 142115, 140779, 140815, 144505, 146078, 147347, 145520, 137282,
129534, 122080, 115007, 106154, 97886, 94421, 94549, 94918, 92353, 93494, 92458,
91658, 90482, 90480, 90752, 91398, 90260, 91073, 90618, 89949, 84698, 84904, 84485,
83301, 79901, 79647, 77832, 76557, 74923, 75808, 75384, 73672, 74206, 72904, 71879,
70993, 72901, 71275, 71980, 71121, 71191, 72573, 72081, 74285, 72105, 70771, 70913,
70382, 67968, 62205, 58451, 56489, 54524, 49800, 46462, 44237, 44029, 44049, 43651,
45007, 44672, 45482, 47038, 47356, 45593, 46442, 46318, 45103, 45698, 43867, 45419,
45368, 46018, 44501, 44212, 44648, 43434, 43482, 43496, 43368, 42490, 41392, 41773,
42190, 41488, 39075, 38786, 39213, 39191, 37233, 34853, 33064, 30154, 28448, 27289,
25810, 23157, 22341, 21056, 18164, 15700, 14128, 13210, 11470, 9955, 9308, 8862, 8477,
8220, 8115, 7398, 6974, 6501, 5930, 5527, 4303, 2971, 2024, 1172, 322])
_usnob1_stars_per_file_cumsum = _usnob1_stars_per_file.cumsum()
"""
Needed to determine maximum absolute RA and DEC proper motions contained in the catalog in order to then be able
to calculate the maximum over-search distance needed for any given field (as a function of epoch).
Run as a one-time piece of code:
import nomad
max_RA_pm = 0
max_DEC_pm = 0
for curfilenum in range(1800):
print curfilenum, max_RA_pm, max_DEC_pm
df = nomad._convert_raw_byte_data_to_dataframe(open(nomad._nomad_dir + '/' +
('%04i' % curfilenum)[0:3] + '/m' +
('%04i' % curfilenum) + '.cat', 'rb').read(-1))
max_RA_pm = max(max_RA_pm, max(abs(df['proper motion of RA*COS(dec) in integer 0.0001 arcsec/year'])))
max_DEC_pm = max(max_DEC_pm, max(abs(df['proper motion of SPD in integer 0.0001 arcsec/year'])))
print "_max_pm_RAcosDEC_arcsecPerYear = " + str(max_RA_pm * 0.0001)
print "_max_pm_DEC_arcsecPerYear = " + str(max_DEC_pm * 0.0001)
"""
_max_pm_RAcosDEC_arcsecPerYear = 9.894
_max_pm_DEC_arcsecPerYear = 10.3269
def _determine_usnob1_id_from_usnob1_integer(usnob1_integer):
if usnob1_integer == 0:
return ''
else:
file_number = (_usnob1_stars_per_file_cumsum > usnob1_integer).argmax()
star_number = usnob1_integer - _usnob1_stars_per_file_cumsum[file_number - 1]
return ('%04i' % file_number) + '-' + ('%07i' % star_number)
def _datetime_to_decimal_year(epoch):
"""
Input - datetime.datetime or datetime.date or integer year or float year
Output - decimal year
"""
if isinstance(epoch, datetime.datetime):
return 2000.0 + (epoch - datetime.datetime(2000, 1, 1, 0, 0)).total_seconds() / (365.25 * 24. * 3600.)
elif isinstance(epoch, datetime.date):
return 2000.0 + (datetime.datetime(epoch.year, epoch.month, epoch.day, 0, 0) -
datetime.datetime(2000, 1, 1, 0, 0)).total_seconds() / (365.25 * 24. * 3600.)
else:
try:
return float(epoch)
except ValueError:
raise Error('Unable to convert input epoch to floating point.')
def _read_accelerator_file(file_number):
nomad_filenum_str = '%04i' % file_number
if os.path.isfile(os.path.join(_nomad_dir, nomad_filenum_str[0:3], 'm' + nomad_filenum_str + '.acc')):
df = read_csv(open(os.path.join(_nomad_dir, nomad_filenum_str[0:3], 'm' + nomad_filenum_str + '.acc'), 'r'),
sep='\s+', header=None, names=['ra_band', 'start_record', 'num_records'],
index_col='ra_band')
elif os.path.isfile(os.path.join(_nomad_dir, nomad_filenum_str[0:3], 'm' + nomad_filenum_str + '.acc.gz')):
df = read_csv(gzip.open(os.path.join(_nomad_dir, nomad_filenum_str[0:3], 'm' + nomad_filenum_str + '.acc.gz'), 'r'),
sep='\s+', header=None, names=['ra_band', 'start_record', 'num_records'],
index_col='ra_band')
else:
raise Error("Could not find accelerator file number " + str(file_number))
return df
def _determine_record_numbers_to_retrieve(min_ra_swatch, max_ra_swatch, dec_filenum):
acc = _read_accelerator_file(dec_filenum)
if min_ra_swatch <= max_ra_swatch:
return [(acc.ix[min_ra_swatch]['start_record'],
acc.ix[max_ra_swatch]['start_record'] + acc.ix[max_ra_swatch]['num_records'] - 1)]
else:
return [(1, acc.ix[max_ra_swatch]['start_record'] + acc.ix[max_ra_swatch]['num_records'] - 1),
(acc.ix[min_ra_swatch]['start_record'],
acc.ix[23.75]['start_record'] + acc.ix[23.75]['num_records'] - 1)]
def _convert_raw_byte_data_to_dataframe(raw_byte_data, nomad_ids=None):
"""
Input is a byte string of one or more raw NOMAD catalog entries.
if available, nomad_star_ids will be used to set the index of the returned DataFrame.
(otherwise index is just integers)
Output is a pandas.DataFrame
"""
# Each record contains 22 integers (4 byte). The schema is:
schema = """ ( 1) RA at 2000.0 in integer 0.001 arcsec
( 2) SPD at 2000.0 in integer 0.001 arcsec
( 3) std. dev. of RA*COS(dec) in integer 0.001 arcsec at central epoch
( 4) std. dev. of SPD in integer 0.001 arcsec at central epoch
( 5) proper motion of RA*COS(dec) in integer 0.0001 arcsec/year
( 6) proper motion of SPD in integer 0.0001 arcsec/year
( 7) std. dev. of (5) in integer 0.0001 arcsec/year
( 8) std. dev. of (6) in integer 0.0001 arcsec/year
( 9) central epoch of RA in integer 0.001 year
(10) central epoch of SPD in integer 0.001 year
(11) B magnitude in integer 0.001 mag
(12) V magnitude in integer 0.001 mag
(13) R magnitude in integer 0.001 mag
(14) J magnitude in integer 0.001 mag
(15) H magnitude in integer 0.001 mag
(16) K magnitude in integer 0.001 mag
(17) USNO-B1.0 ID integer
(18) 2MASS ID integer
(19) YB6 ID integer
(20) UCAC-2 ID integer
(21) Tycho2 ID integer
(22) flags integer"""
dtype = np.dtype([(a[a.find(')') + 1:].strip(), '<i4') for a in schema.splitlines()])
df = DataFrame(np.fromstring(raw_byte_data, dtype=dtype))
if nomad_ids is not None:
if type(nomad_ids) == str:
df.index = [nomad_ids]
else:
df.index = nomad_ids
df['RAJ2000_epoch2000'] = df['RA at 2000.0 in integer 0.001 arcsec'] * 0.001 / 3600.
df['DEJ2000_epoch2000'] = -90. + df['SPD at 2000.0 in integer 0.001 arcsec'] * 0.001 / 3600.
df['Bmag'] = df['B magnitude in integer 0.001 mag'] * 0.001
df['Vmag'] = df['V magnitude in integer 0.001 mag'] * 0.001
df['Rmag'] = df['R magnitude in integer 0.001 mag'] * 0.001
df['Jmag'] = df['J magnitude in integer 0.001 mag'] * 0.001
df['Hmag'] = df['H magnitude in integer 0.001 mag'] * 0.001
df['Kmag'] = df['K magnitude in integer 0.001 mag'] * 0.001
for cur_band in ['Bmag', 'Vmag', 'Rmag', 'Jmag', 'Hmag', 'Kmag']:
df.loc[df[cur_band] == 30.0, cur_band] = np.NaN
df['USNO-B1.0'] = [_determine_usnob1_id_from_usnob1_integer(a) for a in df['USNO-B1.0 ID integer']]
# df['2MASS'] = df['2MASS ID integer'] # TODO: someday add conversion to 2MASS ID
# df['YB6'] = df['YB6 ID integer'] # TODO: someday add conversion to YB6 ID
# df['UCAC-2'] = df['UCAC-2 ID integer'] # TODO: someday add conversion to UCAC-2 ID
# df['flags'] = df['flags integer'] # TODO: decode flags integer into something useful
columns_to_drop = list(dtype.names)
# We want to save some of the other original columns, though they will be deleted later after they
# have been used in _apply_proper_motion
for column_name in ['std. dev. of RA*COS(dec) in integer 0.001 arcsec at central epoch',
'std. dev. of SPD in integer 0.001 arcsec at central epoch',
'proper motion of RA*COS(dec) in integer 0.0001 arcsec/year',
'proper motion of SPD in integer 0.0001 arcsec/year',
'std. dev. of (5) in integer 0.0001 arcsec/year',
'std. dev. of (6) in integer 0.0001 arcsec/year',
'central epoch of RA in integer 0.001 year',
'central epoch of SPD in integer 0.001 year']:
columns_to_drop.pop(columns_to_drop.index(column_name))
return df.drop(columns_to_drop, axis=1)
def _apply_proper_motion(df, epoch=2000.0):
"""
Apply proper motion for input epoch.
Also calculate coordinate uncertainty at the epoch.
Drop the original columns.
Note: We encourage the user to NOT later re-calculate proper motions for a different epoch. The better
way is to reload the search for the new epoch. The risk is the small edge case of a star that at epoch 1 is
not in the requested field but is in the field at epoch 2.
"""
epoch = _datetime_to_decimal_year(epoch)
years_since_2000 = epoch - 2000.0
cosDec = np.cos(np.radians(df['DEJ2000_epoch2000']))
pm_RA = df['proper motion of RA*COS(dec) in integer 0.0001 arcsec/year'] * (0.0001 / 3600.) / cosDec
df['RAJ2000'] = (df['RAJ2000_epoch2000'] + pm_RA * years_since_2000) % 360.0
pm_DEC = df['proper motion of SPD in integer 0.0001 arcsec/year'] * (0.0001 / 3600.)
df['DEJ2000'] = df['DEJ2000_epoch2000'] + pm_DEC * years_since_2000
df['epoch'] = epoch
years_since_central_epoch_RA = epoch - df['central epoch of RA in integer 0.001 year'] * 0.001
base_err_RA = df['std. dev. of RA*COS(dec) in integer 0.001 arcsec at central epoch'] * (0.001 / 3600.) / cosDec
pm_err_RA = (years_since_central_epoch_RA *
df['std. dev. of (5) in integer 0.0001 arcsec/year'] * (0.0001 / 3600.) / cosDec)
df['errRAJ2000'] = np.sqrt((base_err_RA) ** 2 + (pm_err_RA) ** 2)
years_since_central_epoch_DEC = epoch - df['central epoch of SPD in integer 0.001 year'] * 0.001
base_err_DEC = df['std. dev. of SPD in integer 0.001 arcsec at central epoch'] * (0.001 / 3600.)
pm_err_DEC = (years_since_central_epoch_DEC *
df['std. dev. of (6) in integer 0.0001 arcsec/year'] * (0.0001 / 3600.))
df['errDEJ2000'] = np.sqrt((base_err_DEC) ** 2 + (pm_err_DEC) ** 2)
df['proper motion of RA*COS(dec) arcsec/year'] = df['proper motion of RA*COS(dec) in integer 0.0001 arcsec/year'] * 0.0001
df['proper motion of Dec in arcsec/year'] = df['proper motion of SPD in integer 0.0001 arcsec/year'] * 0.0001
columns_to_drop = ['RAJ2000_epoch2000', 'DEJ2000_epoch2000',
'std. dev. of RA*COS(dec) in integer 0.001 arcsec at central epoch',
'std. dev. of SPD in integer 0.001 arcsec at central epoch',
'proper motion of RA*COS(dec) in integer 0.0001 arcsec/year',
'proper motion of SPD in integer 0.0001 arcsec/year',
'std. dev. of (5) in integer 0.0001 arcsec/year',
'std. dev. of (6) in integer 0.0001 arcsec/year',
'central epoch of RA in integer 0.001 year',
'central epoch of SPD in integer 0.001 year']
return df.drop(columns_to_drop, axis=1)
def fetch_star_by_nomad_id(nomad_ids, epoch=None):
"""
nomad_ids - can be either a single NOMAD identifier, e.g.:
'0999-0192017'
or an iterable object (e.g. list, etc) of NOMAD identifiers, e.g.:
['0999-0192017', '0999-0192019']
"""
if type(nomad_ids) == str:
nomad_ids = [nomad_ids]
try:
file_numbers = np.array([np.int(a.split('-')[0]) for a in nomad_ids], dtype=np.int)
star_numbers = np.array([np.int(a.split('-')[1]) for a in nomad_ids], dtype=np.int)
except:
raise Error('Unable to parse input into NOMAD file numbers and star numbers')
nstars = len(file_numbers)
raw_byte_data = [None] * nstars
for i in range(nstars):
if raw_byte_data[i] is None:
nomad_filenum_str = '%04i' % file_numbers[i]
if os.path.isfile(os.path.join(_nomad_dir, nomad_filenum_str[0:3], 'm' + nomad_filenum_str + '.cat')):
f = open(os.path.join(_nomad_dir, nomad_filenum_str[0:3], 'm' + nomad_filenum_str + '.cat'), 'rb')
elif os.path.isfile(os.path.join(_nomad_dir, nomad_filenum_str[0:3], 'm' + nomad_filenum_str + '.cat.gz')):
f = gzip.open(os.path.join(_nomad_dir, nomad_filenum_str[0:3], 'm' + nomad_filenum_str + '.cat.gz'), 'rb')
else:
raise Error("Could not find nomad file number " + nomad_filenum_str)
for j in np.where(file_numbers == file_numbers[i])[0]:
f.seek(_nomad_record_length_bytes * (star_numbers[j] - 1))
raw_byte_data[j] = f.read(_nomad_record_length_bytes)
f.close()
df = _convert_raw_byte_data_to_dataframe(''.join(raw_byte_data), nomad_ids=nomad_ids)
if epoch is None:
returned_star = _apply_proper_motion(df, epoch=2000.0)
else:
returned_star = _apply_proper_motion(df, epoch=epoch)
return returned_star
def fetch_nomad_box(ra_range, dec_range, epoch=2000.0):
"""
ra_range - [>=low, <high] RA in degrees
can wrap around 360, e.g. [359.5, 0.5]
dec_range - [>=low, <high] DEC in degrees
order of dec_range is irrelevant as search area is >=min(dec_range) to <max(dec_range)
epoch - default of 2000.0, can also be a datetime.date or datetime.datetime
Note that search is over a larger box than requested, then proper motions are applied, then
requested ra_range/dec_range limits are applied. Size of over-search is dictated by
the largest proper motion in the NOMAD catalog.
"""
# DEC swatches are 0.1 degree
# stars within a DEC swatch are >=minDec and <(minDec + 0.1deg)
# RA swatches (within accelerator files) are 0.25 degree
# stars within a RA range are >=minRA and <(minRA + 0.25deg)
epoch = _datetime_to_decimal_year(epoch)
years_since_2000 = epoch - 2000.0
dec_oversearch = np.abs((years_since_2000 * _max_pm_DEC_arcsecPerYear) / 3600.)
min_dec = max(-90.0, min(dec_range) - dec_oversearch)
max_dec = min(90.0, max(dec_range) + dec_oversearch)
min_dec_filenum = int((min_dec + 90) * 10.)
max_dec_filenum = min(1799, int((max_dec + 90) * 10.))
# TODO: fix that the next two lines break when max_dec == 90 or min_dec == -90 because of divide-by-zero
min_cosDec = min(np.cos(np.radians(min_dec)), np.cos(np.radians(max_dec)))
ra_oversearch = np.abs((years_since_2000 * _max_pm_RAcosDEC_arcsecPerYear) / (3600. * min_cosDec))
min_ra = ((ra_range[0] - ra_oversearch) % 360.) / 15.
max_ra = ((ra_range[1] + ra_oversearch) % 360.) / 15.
min_ra_swatch = int(min_ra / 0.25) * 0.25
max_ra_swatch = int(max_ra / 0.25) * 0.25
raw_byte_data = ''
nomad_ids = []
for cur_dec_filenum in np.arange(min_dec_filenum, max_dec_filenum + 1):
records_to_retrieve = _determine_record_numbers_to_retrieve(min_ra_swatch, max_ra_swatch, cur_dec_filenum)
nomad_filenum_str = '%04i' % cur_dec_filenum
if os.path.isfile(os.path.join(_nomad_dir, nomad_filenum_str[0:3], 'm' + nomad_filenum_str + '.cat')):
f = open(os.path.join(_nomad_dir, nomad_filenum_str[0:3], 'm' + nomad_filenum_str + '.cat'), 'rb')
elif os.path.isfile(os.path.join(_nomad_dir, nomad_filenum_str[0:3], 'm' + nomad_filenum_str + '.cat.gz')):
f = gzip.open(os.path.join(_nomad_dir, nomad_filenum_str[0:3], 'm' + nomad_filenum_str + '.cat.gz'), 'rb')
else:
raise Error("Could not find nomad file number " + nomad_filenum_str)
for cur_rec in records_to_retrieve:
f.seek((cur_rec[0] - 1) * _nomad_record_length_bytes)
raw_byte_data += f.read((cur_rec[1] - cur_rec[0] + 1) * _nomad_record_length_bytes)
nomad_ids.extend([nomad_filenum_str + '-' + ('%07i' % a) for a in range(cur_rec[0], cur_rec[1] + 1)])
stars = _apply_proper_motion(_convert_raw_byte_data_to_dataframe(raw_byte_data, nomad_ids=nomad_ids), epoch=epoch)
stars = stars[(stars['DEJ2000'] >= min(dec_range)) & (stars['DEJ2000'] < max(dec_range))]
if min_ra <= max_ra:
stars = stars[(stars['RAJ2000'] >= ra_range[0]) & (stars['RAJ2000'] < ra_range[1])]
else:
stars = stars[(stars['RAJ2000'] < ra_range[1]) | (stars['RAJ2000'] >= ra_range[0])]
return stars
if __name__ == '__main__':
if len(sys.argv) == 1:
pass
else:
if sys.argv[1] == 'help':
pass # TODO: write instructions on options
elif sys.argv[1] == 'radec_range':
try:
ra_range = [float(sys.argv[2]), float(sys.argv[3])]
dec_range = [float(sys.argv[4]), float(sys.argv[5])]
epoch = float(sys.argv[6])
except:
raise Error("Expected 5 numbers after radec_range: \n\t" +
"RA_low_deg RA_high_deg DEC_low_deg DEC_high_deg Epoch")
stars = fetch_nomad_box(ra_range, dec_range, epoch)
if 'pickle' in sys.argv:
pickle.dump(stars, sys.stdout)
else:
sys.stdout.write(stars.to_string() + '\n')
else:
raise Error("unrecognized input on command line: \n\t" + ' '.join(sys.argv))
|
mit
|
aclifton/cpeg853-gem5
|
util/dram_lat_mem_rd_plot.py
|
13
|
5155
|
#!/usr/bin/env python
# Copyright (c) 2015 ARM Limited
# All rights reserved
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Andreas Hansson
try:
import matplotlib.pyplot as plt
import matplotlib as mpl
import numpy as np
except ImportError:
print "Failed to import matplotlib and numpy"
exit(-1)
import sys
import re
# This script is intended to post process and plot the output from
# running configs/dram/lat_mem_rd.py, as such it parses the simout and
# stats.txt to get the relevant data points.
def main():
if len(sys.argv) != 2:
print "Usage: ", sys.argv[0], "<simout directory>"
exit(-1)
try:
stats = open(sys.argv[1] + '/stats.txt', 'r')
except IOError:
print "Failed to open ", sys.argv[1] + '/stats.txt', " for reading"
exit(-1)
try:
simout = open(sys.argv[1] + '/simout', 'r')
except IOError:
print "Failed to open ", sys.argv[1] + '/simout', " for reading"
exit(-1)
# Get the address ranges
got_ranges = False
ranges = []
iterations = 1
for line in simout:
if got_ranges:
ranges.append(int(line) / 1024)
match = re.match("lat_mem_rd with (\d+) iterations, ranges:.*", line)
if match:
got_ranges = True
iterations = int(match.groups(0)[0])
simout.close()
if not got_ranges:
print "Failed to get address ranges, ensure simout is up-to-date"
exit(-1)
# Now parse the stats
raw_rd_lat = []
for line in stats:
match = re.match(".*readLatencyHist::mean\s+(.+)\s+#.*", line)
if match:
raw_rd_lat.append(float(match.groups(0)[0]) / 1000)
stats.close()
# The stats also contain the warming, so filter the latency stats
i = 0
filtered_rd_lat = []
for l in raw_rd_lat:
if i % (iterations + 1) == 0:
pass
else:
filtered_rd_lat.append(l)
i = i + 1
# Next we need to take care of the iterations
rd_lat = []
for i in range(iterations):
rd_lat.append(filtered_rd_lat[i::iterations])
final_rd_lat = map(lambda p: min(p), zip(*rd_lat))
# Sanity check
if not (len(ranges) == len(final_rd_lat)):
print "Address ranges (%d) and read latency (%d) do not match" % \
(len(ranges), len(final_rd_lat))
exit(-1)
for (r, l) in zip(ranges, final_rd_lat):
print r, round(l, 2)
# lazy version to check if an integer is a power of two
def is_pow2(num):
return num != 0 and ((num & (num - 1)) == 0)
plt.semilogx(ranges, final_rd_lat)
# create human readable labels
xticks_locations = [r for r in ranges if is_pow2(r)]
xticks_labels = []
for x in xticks_locations:
if x < 1024:
xticks_labels.append('%d kB' % x)
else:
xticks_labels.append('%d MB' % (x / 1024))
plt.xticks(xticks_locations, xticks_labels, rotation=-45)
plt.minorticks_off()
plt.xlim((xticks_locations[0], xticks_locations[-1]))
plt.ylabel("Latency (ns)")
plt.grid(True)
plt.show()
if __name__ == "__main__":
main()
|
bsd-3-clause
|
IssamLaradji/scikit-learn
|
sklearn/ensemble/tests/test_weight_boosting.py
|
9
|
15843
|
"""Testing for the boost module (sklearn.ensemble.boost)."""
import numpy as np
from sklearn.utils.testing import assert_array_equal, assert_array_less
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises, assert_raises_regexp
from sklearn.cross_validation import train_test_split
from sklearn.grid_search import GridSearchCV
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import AdaBoostRegressor
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import coo_matrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
from sklearn.svm import SVC, SVR
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.utils import shuffle
from sklearn import datasets
# Common random state
rng = np.random.RandomState(0)
# Toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y_class = ["foo", "foo", "foo", 1, 1, 1] # test string class labels
y_regr = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
y_t_class = ["foo", 1, 1]
y_t_regr = [-1, 1, 1]
# Load the iris dataset and randomly permute it
iris = datasets.load_iris()
perm = rng.permutation(iris.target.size)
iris.data, iris.target = shuffle(iris.data, iris.target, random_state=rng)
# Load the boston dataset and randomly permute it
boston = datasets.load_boston()
boston.data, boston.target = shuffle(boston.data, boston.target,
random_state=rng)
def test_classification_toy():
"""Check classification on a toy dataset."""
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg, random_state=0)
clf.fit(X, y_class)
assert_array_equal(clf.predict(T), y_t_class)
assert_array_equal(np.unique(np.asarray(y_t_class)), clf.classes_)
assert_equal(clf.predict_proba(T).shape, (len(T), 2))
assert_equal(clf.decision_function(T).shape, (len(T),))
def test_regression_toy():
"""Check classification on a toy dataset."""
clf = AdaBoostRegressor(random_state=0)
clf.fit(X, y_regr)
assert_array_equal(clf.predict(T), y_t_regr)
def test_iris():
"""Check consistency on dataset iris."""
classes = np.unique(iris.target)
clf_samme = prob_samme = None
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg)
clf.fit(iris.data, iris.target)
assert_array_equal(classes, clf.classes_)
proba = clf.predict_proba(iris.data)
if alg == "SAMME":
clf_samme = clf
prob_samme = proba
assert_equal(proba.shape[1], len(classes))
assert_equal(clf.decision_function(iris.data).shape[1], len(classes))
score = clf.score(iris.data, iris.target)
assert score > 0.9, "Failed with algorithm %s and score = %f" % \
(alg, score)
# Somewhat hacky regression test: prior to
# ae7adc880d624615a34bafdb1d75ef67051b8200,
# predict_proba returned SAMME.R values for SAMME.
clf_samme.algorithm = "SAMME.R"
assert_array_less(0,
np.abs(clf_samme.predict_proba(iris.data) - prob_samme))
def test_boston():
"""Check consistency on dataset boston house prices."""
clf = AdaBoostRegressor(random_state=0)
clf.fit(boston.data, boston.target)
score = clf.score(boston.data, boston.target)
assert score > 0.85
def test_staged_predict():
"""Check staged predictions."""
rng = np.random.RandomState(0)
iris_weights = rng.randint(10, size=iris.target.shape)
boston_weights = rng.randint(10, size=boston.target.shape)
# AdaBoost classification
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg, n_estimators=10)
clf.fit(iris.data, iris.target, sample_weight=iris_weights)
predictions = clf.predict(iris.data)
staged_predictions = [p for p in clf.staged_predict(iris.data)]
proba = clf.predict_proba(iris.data)
staged_probas = [p for p in clf.staged_predict_proba(iris.data)]
score = clf.score(iris.data, iris.target, sample_weight=iris_weights)
staged_scores = [
s for s in clf.staged_score(
iris.data, iris.target, sample_weight=iris_weights)]
assert_equal(len(staged_predictions), 10)
assert_array_almost_equal(predictions, staged_predictions[-1])
assert_equal(len(staged_probas), 10)
assert_array_almost_equal(proba, staged_probas[-1])
assert_equal(len(staged_scores), 10)
assert_array_almost_equal(score, staged_scores[-1])
# AdaBoost regression
clf = AdaBoostRegressor(n_estimators=10, random_state=0)
clf.fit(boston.data, boston.target, sample_weight=boston_weights)
predictions = clf.predict(boston.data)
staged_predictions = [p for p in clf.staged_predict(boston.data)]
score = clf.score(boston.data, boston.target, sample_weight=boston_weights)
staged_scores = [
s for s in clf.staged_score(
boston.data, boston.target, sample_weight=boston_weights)]
assert_equal(len(staged_predictions), 10)
assert_array_almost_equal(predictions, staged_predictions[-1])
assert_equal(len(staged_scores), 10)
assert_array_almost_equal(score, staged_scores[-1])
def test_gridsearch():
"""Check that base trees can be grid-searched."""
# AdaBoost classification
boost = AdaBoostClassifier(base_estimator=DecisionTreeClassifier())
parameters = {'n_estimators': (1, 2),
'base_estimator__max_depth': (1, 2),
'algorithm': ('SAMME', 'SAMME.R')}
clf = GridSearchCV(boost, parameters)
clf.fit(iris.data, iris.target)
# AdaBoost regression
boost = AdaBoostRegressor(base_estimator=DecisionTreeRegressor(),
random_state=0)
parameters = {'n_estimators': (1, 2),
'base_estimator__max_depth': (1, 2)}
clf = GridSearchCV(boost, parameters)
clf.fit(boston.data, boston.target)
def test_pickle():
"""Check pickability."""
import pickle
# Adaboost classifier
for alg in ['SAMME', 'SAMME.R']:
obj = AdaBoostClassifier(algorithm=alg)
obj.fit(iris.data, iris.target)
score = obj.score(iris.data, iris.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(iris.data, iris.target)
assert_equal(score, score2)
# Adaboost regressor
obj = AdaBoostRegressor(random_state=0)
obj.fit(boston.data, boston.target)
score = obj.score(boston.data, boston.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(boston.data, boston.target)
assert_equal(score, score2)
def test_importances():
"""Check variable importances."""
X, y = datasets.make_classification(n_samples=2000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=1)
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg)
clf.fit(X, y)
importances = clf.feature_importances_
assert_equal(importances.shape[0], 10)
assert_equal((importances[:3, np.newaxis] >= importances[3:]).all(),
True)
def test_error():
"""Test that it gives proper exception on deficient input."""
assert_raises(ValueError,
AdaBoostClassifier(learning_rate=-1).fit,
X, y_class)
assert_raises(ValueError,
AdaBoostClassifier(algorithm="foo").fit,
X, y_class)
assert_raises(ValueError,
AdaBoostClassifier().fit,
X, y_class, sample_weight=np.asarray([-1]))
def test_base_estimator():
"""Test different base estimators."""
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
# XXX doesn't work with y_class because RF doesn't support classes_
# Shouldn't AdaBoost run a LabelBinarizer?
clf = AdaBoostClassifier(RandomForestClassifier())
clf.fit(X, y_regr)
clf = AdaBoostClassifier(SVC(), algorithm="SAMME")
clf.fit(X, y_class)
from sklearn.ensemble import RandomForestRegressor
from sklearn.svm import SVR
clf = AdaBoostRegressor(RandomForestRegressor(), random_state=0)
clf.fit(X, y_regr)
clf = AdaBoostRegressor(SVR(), random_state=0)
clf.fit(X, y_regr)
# Check that an empty discrete ensemble fails in fit, not predict.
X_fail = [[1, 1], [1, 1], [1, 1], [1, 1]]
y_fail = ["foo", "bar", 1, 2]
clf = AdaBoostClassifier(SVC(), algorithm="SAMME")
assert_raises_regexp(ValueError, "worse than random",
clf.fit, X_fail, y_fail)
def test_sample_weight_missing():
from sklearn.linear_model import LinearRegression
from sklearn.cluster import KMeans
clf = AdaBoostClassifier(LinearRegression(), algorithm="SAMME")
assert_raises(ValueError, clf.fit, X, y_regr)
clf = AdaBoostRegressor(LinearRegression())
assert_raises(ValueError, clf.fit, X, y_regr)
clf = AdaBoostClassifier(KMeans(), algorithm="SAMME")
assert_raises(ValueError, clf.fit, X, y_regr)
clf = AdaBoostRegressor(KMeans())
assert_raises(ValueError, clf.fit, X, y_regr)
def test_sparse_classification():
"""Check classification with sparse input."""
class CustomSVC(SVC):
"""SVC variant that records the nature of the training set."""
def fit(self, X, y, sample_weight=None):
"""Modification on fit caries data type for later verification."""
super(CustomSVC, self).fit(X, y, sample_weight=sample_weight)
self.data_type_ = type(X)
return self
X, y = datasets.make_multilabel_classification(n_classes=1, n_samples=15,
n_features=5,
return_indicator=True,
random_state=42)
# Flatten y to a 1d array
y = np.ravel(y)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for sparse_format in [csc_matrix, csr_matrix, lil_matrix, coo_matrix,
dok_matrix]:
X_train_sparse = sparse_format(X_train)
X_test_sparse = sparse_format(X_test)
# Trained on sparse format
sparse_classifier = AdaBoostClassifier(
base_estimator=CustomSVC(probability=True),
random_state=1,
algorithm="SAMME"
).fit(X_train_sparse, y_train)
# Trained on dense format
dense_classifier = AdaBoostClassifier(
base_estimator=CustomSVC(probability=True),
random_state=1,
algorithm="SAMME"
).fit(X_train, y_train)
# predict
sparse_results = sparse_classifier.predict(X_test_sparse)
dense_results = dense_classifier.predict(X_test)
assert_array_equal(sparse_results, dense_results)
# decision_function
sparse_results = sparse_classifier.decision_function(X_test_sparse)
dense_results = dense_classifier.decision_function(X_test)
assert_array_equal(sparse_results, dense_results)
# predict_log_proba
sparse_results = sparse_classifier.predict_log_proba(X_test_sparse)
dense_results = dense_classifier.predict_log_proba(X_test)
assert_array_equal(sparse_results, dense_results)
# predict_proba
sparse_results = sparse_classifier.predict_proba(X_test_sparse)
dense_results = dense_classifier.predict_proba(X_test)
assert_array_equal(sparse_results, dense_results)
# score
sparse_results = sparse_classifier.score(X_test_sparse, y_test)
dense_results = dense_classifier.score(X_test, y_test)
assert_array_equal(sparse_results, dense_results)
# staged_decision_function
sparse_results = sparse_classifier.staged_decision_function(
X_test_sparse)
dense_results = dense_classifier.staged_decision_function(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# staged_predict
sparse_results = sparse_classifier.staged_predict(X_test_sparse)
dense_results = dense_classifier.staged_predict(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# staged_predict_proba
sparse_results = sparse_classifier.staged_predict_proba(X_test_sparse)
dense_results = dense_classifier.staged_predict_proba(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# staged_score
sparse_results = sparse_classifier.staged_score(X_test_sparse,
y_test)
dense_results = dense_classifier.staged_score(X_test, y_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# Verify sparsity of data is maintained during training
types = [i.data_type_ for i in sparse_classifier.estimators_]
assert all([(t == csc_matrix or t == csr_matrix)
for t in types])
def test_sparse_regression():
"""Check regression with sparse input."""
class CustomSVR(SVR):
"""SVR variant that records the nature of the training set."""
def fit(self, X, y, sample_weight=None):
"""Modification on fit caries data type for later verification."""
super(CustomSVR, self).fit(X, y, sample_weight=sample_weight)
self.data_type_ = type(X)
return self
X, y = datasets.make_regression(n_samples=15, n_features=50, n_targets=1,
random_state=42)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for sparse_format in [csc_matrix, csr_matrix, lil_matrix, coo_matrix,
dok_matrix]:
X_train_sparse = sparse_format(X_train)
X_test_sparse = sparse_format(X_test)
# Trained on sparse format
sparse_classifier = AdaBoostRegressor(
base_estimator=CustomSVR(probability=True),
random_state=1
).fit(X_train_sparse, y_train)
# Trained on dense format
dense_classifier = dense_results = AdaBoostRegressor(
base_estimator=CustomSVR(probability=True),
random_state=1
).fit(X_train, y_train)
# predict
sparse_results = sparse_classifier.predict(X_test_sparse)
dense_results = dense_classifier.predict(X_test)
assert_array_equal(sparse_results, dense_results)
# staged_predict
sparse_results = sparse_classifier.staged_predict(X_test_sparse)
dense_results = dense_classifier.staged_predict(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
types = [i.data_type_ for i in sparse_classifier.estimators_]
assert all([(t == csc_matrix or t == csr_matrix)
for t in types])
if __name__ == "__main__":
import nose
nose.runmodule()
|
bsd-3-clause
|
soulmachine/scikit-learn
|
sklearn/setup.py
|
4
|
3091
|
import os
from os.path import join
import warnings
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
from numpy.distutils.system_info import get_info, BlasNotFoundError
import numpy
libraries = []
if os.name == 'posix':
libraries.append('m')
config = Configuration('sklearn', parent_package, top_path)
config.add_subpackage('__check_build')
config.add_subpackage('svm')
config.add_subpackage('datasets')
config.add_subpackage('datasets/tests')
config.add_subpackage('feature_extraction')
config.add_subpackage('feature_extraction/tests')
config.add_subpackage('cluster')
config.add_subpackage('cluster/tests')
config.add_subpackage('cluster/bicluster')
config.add_subpackage('cluster/bicluster/tests')
config.add_subpackage('covariance')
config.add_subpackage('covariance/tests')
config.add_subpackage('cross_decomposition')
config.add_subpackage('decomposition')
config.add_subpackage('decomposition/tests')
config.add_subpackage("ensemble")
config.add_subpackage("ensemble/tests")
config.add_subpackage('feature_selection')
config.add_subpackage('feature_selection/tests')
config.add_subpackage('utils')
config.add_subpackage('utils/tests')
config.add_subpackage('externals')
config.add_subpackage('mixture')
config.add_subpackage('mixture/tests')
config.add_subpackage('gaussian_process')
config.add_subpackage('gaussian_process/tests')
config.add_subpackage('neighbors')
config.add_subpackage('neural_network')
config.add_subpackage('preprocessing')
config.add_subpackage('manifold')
config.add_subpackage('metrics')
config.add_subpackage('semi_supervised')
config.add_subpackage("tree")
config.add_subpackage("tree/tests")
config.add_subpackage('metrics/tests')
config.add_subpackage('metrics/cluster')
config.add_subpackage('metrics/cluster/tests')
# add cython extension module for hmm
config.add_extension(
'_hmmc',
sources=['_hmmc.c'],
include_dirs=[numpy.get_include()],
libraries=libraries,
)
config.add_extension(
'_isotonic',
sources=['_isotonic.c'],
include_dirs=[numpy.get_include()],
libraries=libraries,
)
# some libs needs cblas, fortran-compiled BLAS will not be sufficient
blas_info = get_info('blas_opt', 0)
if (not blas_info) or (
('NO_ATLAS_INFO', 1) in blas_info.get('define_macros', [])):
config.add_library('cblas',
sources=[join('src', 'cblas', '*.c')])
warnings.warn(BlasNotFoundError.__doc__)
# the following packages depend on cblas, so they have to be build
# after the above.
config.add_subpackage('linear_model')
config.add_subpackage('utils')
# add the test directory
config.add_subpackage('tests')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
|
bsd-3-clause
|
balazssimon/ml-playground
|
udemy/lazyprogrammer/ab-testing-python/client.py
|
1
|
1312
|
# From the course: Bayesin Machine Learning in Python: A/B Testing
# https://deeplearningcourses.com/c/bayesian-machine-learning-in-python-ab-testing
# https://www.udemy.com/bayesian-machine-learning-in-python-ab-testing
from __future__ import print_function, division
from builtins import range
# Note: you may need to update your version of future
# sudo pip install -U future
import requests
import numpy as np
import pandas as pd
from scipy import stats
# get data
df = pd.read_csv('advertisement_clicks.csv')
a = df[df['advertisement_id'] == 'A']
b = df[df['advertisement_id'] == 'B']
a = a['action'].as_matrix()
b = b['action'].as_matrix()
print("a.mean:", a.mean())
print("b.mean:", b.mean())
i = 0
j = 0
count = 0
while i < len(a) and j < len(b):
# quit when there's no data left for either ad
r = requests.get('http://localhost:8888/get_ad')
# print(r.content)
r = r.json()
if r['advertisement_id'] == 'A':
action = a[i]
i += 1
else:
action = b[j]
j += 1
if action == 1:
# only click the ad if our dataset determines that we should
requests.post(
'http://localhost:8888/click_ad',
data={'advertisement_id': r['advertisement_id']}
)
# log some stats
count += 1
if count % 50 == 0:
print("Seen %s ads, A: %s, B: %s" % (count, i, j))
|
apache-2.0
|
adarshp/ExoticHiggs
|
root_analysis/cut_n_count_analysis.py
|
1
|
2532
|
from math import sqrt
import pandas as pd
import subprocess as sp
df = pd.DataFrame()
for p in ['Signal', 'tt_fully_leptonic_including_taus',
'tt_semileptonic_including_taus', 'tttautau']:
df_temp = pd.read_table(p+'/cuts.txt')
df['Cut Name'] = df_temp['Cut Name']
df[p+ ' MC Events'] = df_temp['MC Events']
L = 3000 # Integrated luminosity in inverse femtobarns
tt_semi_xsection = 8271000.0000 # femtobarns
tt_full_xsection = 2490390.7403 # femtobarns
tttautau_xsection = 95.71
mC = 1016.2776
mH = 725.09
xs_tbC = float(sp.check_output(['./cHtb_xsection', str(mC)]))
BP = pd.read_table('../benchmark_planes/BP_IIB_tb_1.5.txt', delim_whitespace=True)
br_C_HW = BP[BP['mC'] == mC][BP['mH'] == mH]['BR(C>HW)'].tolist()[0]
original_signal_xsection = br_C_HW*xs_tbC
df['tt_fully_leptonic_including_taus MC Events'].replace(0,3, inplace=True)
df['tt_semileptonic_including_taus MC Events'].replace(0,3, inplace=True)
df['tttautau MC Events'].replace(0,3, inplace=True)
# Renaming columns
df['Signal'] = df['Signal MC Events']
df['$tt_{full}$'] = df['tt_fully_leptonic_including_taus MC Events']
df['$tt_{semi}$'] = df['tt_semileptonic_including_taus MC Events']
df['$tt\\tau\\tau$'] = df['tttautau MC Events']
df['$\sigma_{Signal}$'] = original_signal_xsection*df['Signal MC Events']/df['Signal MC Events'][0]
df['$\sigma_{tt-full}$'] = tt_full_xsection*df['$tt_{full}$']/df['$tt_{full}$'][0]
df['$\sigma_{tt-semi}$'] = tt_semi_xsection*df['$tt_{semi}$']/df['$tt_{semi}$'][0]
df['$\sigma_{tt\\tau\\tau}$'] = tttautau_xsection*df['$tt\\tau\\tau$']/df['$tt\\tau\\tau$'][0]
df['$\sigma_{BG}$'] = sum([df['$\sigma_{tt-full}$'], df['$\sigma_{tt-semi}$'], df[r'$\sigma_{tt\tau\tau}$']])
df['S/B'] = df['$\sigma_{Signal}$']/df['$\sigma_{BG}$']
df['$S/\sqrt{B}$'] = df['S/B']*sqrt(L)
pd.set_option('display.width', 1000)
def myFormatter(x):
if x < 0.001: return '%.1e' % x
if x < 0.01: return '%.3f' % x
if x < 1: return '%.2f' % x
if x < 10: return '%.1f' % x
else: return '{:,.0f}'.format(x)
with open('cut_flow_latex_table_mc_events.tex', 'w') as f:
df.to_latex(f, escape=False,
columns = ['Cut Name', 'Signal',
'$tt_{full}$', '$tt_{semi}$', r'$tt\tau\tau$'],
index = False
)
with open('cut_flow_latex_table.tex', 'w') as f:
df.to_latex(f, escape = False, float_format = myFormatter,
columns = ['Cut Name', '$\sigma_{Signal}$',
'$\sigma_{tt-full}$','$\sigma_{tt-semi}$', r'$\sigma_{tt\tau\tau}$',
'S/B', '$S/\sqrt{B}$'], index = False)
|
gpl-3.0
|
rs2/pandas
|
pandas/tests/series/methods/test_reset_index.py
|
1
|
4636
|
import numpy as np
import pytest
import pandas as pd
from pandas import DataFrame, Index, MultiIndex, RangeIndex, Series
import pandas._testing as tm
class TestResetIndex:
def test_reset_index(self):
df = tm.makeDataFrame()[:5]
ser = df.stack()
ser.index.names = ["hash", "category"]
ser.name = "value"
df = ser.reset_index()
assert "value" in df
df = ser.reset_index(name="value2")
assert "value2" in df
# check inplace
s = ser.reset_index(drop=True)
s2 = ser
return_value = s2.reset_index(drop=True, inplace=True)
assert return_value is None
tm.assert_series_equal(s, s2)
# level
index = MultiIndex(
levels=[["bar"], ["one", "two", "three"], [0, 1]],
codes=[[0, 0, 0, 0, 0, 0], [0, 1, 2, 0, 1, 2], [0, 1, 0, 1, 0, 1]],
)
s = Series(np.random.randn(6), index=index)
rs = s.reset_index(level=1)
assert len(rs.columns) == 2
rs = s.reset_index(level=[0, 2], drop=True)
tm.assert_index_equal(rs.index, Index(index.get_level_values(1)))
assert isinstance(rs, Series)
def test_reset_index_name(self):
s = Series([1, 2, 3], index=Index(range(3), name="x"))
assert s.reset_index().index.name is None
assert s.reset_index(drop=True).index.name is None
def test_reset_index_level(self):
df = DataFrame([[1, 2, 3], [4, 5, 6]], columns=["A", "B", "C"])
for levels in ["A", "B"], [0, 1]:
# With MultiIndex
s = df.set_index(["A", "B"])["C"]
result = s.reset_index(level=levels[0])
tm.assert_frame_equal(result, df.set_index("B"))
result = s.reset_index(level=levels[:1])
tm.assert_frame_equal(result, df.set_index("B"))
result = s.reset_index(level=levels)
tm.assert_frame_equal(result, df)
result = df.set_index(["A", "B"]).reset_index(level=levels, drop=True)
tm.assert_frame_equal(result, df[["C"]])
with pytest.raises(KeyError, match="Level E "):
s.reset_index(level=["A", "E"])
# With single-level Index
s = df.set_index("A")["B"]
result = s.reset_index(level=levels[0])
tm.assert_frame_equal(result, df[["A", "B"]])
result = s.reset_index(level=levels[:1])
tm.assert_frame_equal(result, df[["A", "B"]])
result = s.reset_index(level=levels[0], drop=True)
tm.assert_series_equal(result, df["B"])
with pytest.raises(IndexError, match="Too many levels"):
s.reset_index(level=[0, 1, 2])
# Check that .reset_index([],drop=True) doesn't fail
result = Series(range(4)).reset_index([], drop=True)
expected = Series(range(4))
tm.assert_series_equal(result, expected)
def test_reset_index_range(self):
# GH 12071
s = Series(range(2), name="A", dtype="int64")
series_result = s.reset_index()
assert isinstance(series_result.index, RangeIndex)
series_expected = DataFrame(
[[0, 0], [1, 1]], columns=["index", "A"], index=RangeIndex(stop=2)
)
tm.assert_frame_equal(series_result, series_expected)
def test_reset_index_drop_errors(self):
# GH 20925
# KeyError raised for series index when passed level name is missing
s = Series(range(4))
with pytest.raises(KeyError, match="does not match index name"):
s.reset_index("wrong", drop=True)
with pytest.raises(KeyError, match="does not match index name"):
s.reset_index("wrong")
# KeyError raised for series when level to be dropped is missing
s = Series(range(4), index=MultiIndex.from_product([[1, 2]] * 2))
with pytest.raises(KeyError, match="not found"):
s.reset_index("wrong", drop=True)
@pytest.mark.parametrize(
"array, dtype",
[
(["a", "b"], object),
(
pd.period_range("12-1-2000", periods=2, freq="Q-DEC"),
pd.PeriodDtype(freq="Q-DEC"),
),
],
)
def test_reset_index_dtypes_on_empty_series_with_multiindex(array, dtype):
# GH 19602 - Preserve dtype on empty Series with MultiIndex
idx = MultiIndex.from_product([[0, 1], [0.5, 1.0], array])
result = Series(dtype=object, index=idx)[:0].reset_index().dtypes
expected = Series(
{"level_0": np.int64, "level_1": np.float64, "level_2": dtype, 0: object}
)
tm.assert_series_equal(result, expected)
|
bsd-3-clause
|
vybstat/scikit-learn
|
sklearn/feature_extraction/tests/test_dict_vectorizer.py
|
276
|
3790
|
# Authors: Lars Buitinck <[email protected]>
# Dan Blanchard <[email protected]>
# License: BSD 3 clause
from random import Random
import numpy as np
import scipy.sparse as sp
from numpy.testing import assert_array_equal
from sklearn.utils.testing import (assert_equal, assert_in,
assert_false, assert_true)
from sklearn.feature_extraction import DictVectorizer
from sklearn.feature_selection import SelectKBest, chi2
def test_dictvectorizer():
D = [{"foo": 1, "bar": 3},
{"bar": 4, "baz": 2},
{"bar": 1, "quux": 1, "quuux": 2}]
for sparse in (True, False):
for dtype in (int, np.float32, np.int16):
for sort in (True, False):
for iterable in (True, False):
v = DictVectorizer(sparse=sparse, dtype=dtype, sort=sort)
X = v.fit_transform(iter(D) if iterable else D)
assert_equal(sp.issparse(X), sparse)
assert_equal(X.shape, (3, 5))
assert_equal(X.sum(), 14)
assert_equal(v.inverse_transform(X), D)
if sparse:
# CSR matrices can't be compared for equality
assert_array_equal(X.A, v.transform(iter(D) if iterable
else D).A)
else:
assert_array_equal(X, v.transform(iter(D) if iterable
else D))
if sort:
assert_equal(v.feature_names_,
sorted(v.feature_names_))
def test_feature_selection():
# make two feature dicts with two useful features and a bunch of useless
# ones, in terms of chi2
d1 = dict([("useless%d" % i, 10) for i in range(20)],
useful1=1, useful2=20)
d2 = dict([("useless%d" % i, 10) for i in range(20)],
useful1=20, useful2=1)
for indices in (True, False):
v = DictVectorizer().fit([d1, d2])
X = v.transform([d1, d2])
sel = SelectKBest(chi2, k=2).fit(X, [0, 1])
v.restrict(sel.get_support(indices=indices), indices=indices)
assert_equal(v.get_feature_names(), ["useful1", "useful2"])
def test_one_of_k():
D_in = [{"version": "1", "ham": 2},
{"version": "2", "spam": .3},
{"version=3": True, "spam": -1}]
v = DictVectorizer()
X = v.fit_transform(D_in)
assert_equal(X.shape, (3, 5))
D_out = v.inverse_transform(X)
assert_equal(D_out[0], {"version=1": 1, "ham": 2})
names = v.get_feature_names()
assert_true("version=2" in names)
assert_false("version" in names)
def test_unseen_or_no_features():
D = [{"camelot": 0, "spamalot": 1}]
for sparse in [True, False]:
v = DictVectorizer(sparse=sparse).fit(D)
X = v.transform({"push the pram a lot": 2})
if sparse:
X = X.toarray()
assert_array_equal(X, np.zeros((1, 2)))
X = v.transform({})
if sparse:
X = X.toarray()
assert_array_equal(X, np.zeros((1, 2)))
try:
v.transform([])
except ValueError as e:
assert_in("empty", str(e))
def test_deterministic_vocabulary():
# Generate equal dictionaries with different memory layouts
items = [("%03d" % i, i) for i in range(1000)]
rng = Random(42)
d_sorted = dict(items)
rng.shuffle(items)
d_shuffled = dict(items)
# check that the memory layout does not impact the resulting vocabulary
v_1 = DictVectorizer().fit([d_sorted])
v_2 = DictVectorizer().fit([d_shuffled])
assert_equal(v_1.vocabulary_, v_2.vocabulary_)
|
bsd-3-clause
|
russel1237/scikit-learn
|
examples/calibration/plot_calibration.py
|
225
|
4795
|
"""
======================================
Probability calibration of classifiers
======================================
When performing classification you often want to predict not only
the class label, but also the associated probability. This probability
gives you some kind of confidence on the prediction. However, not all
classifiers provide well-calibrated probabilities, some being over-confident
while others being under-confident. Thus, a separate calibration of predicted
probabilities is often desirable as a postprocessing. This example illustrates
two different methods for this calibration and evaluates the quality of the
returned probabilities using Brier's score
(see http://en.wikipedia.org/wiki/Brier_score).
Compared are the estimated probability using a Gaussian naive Bayes classifier
without calibration, with a sigmoid calibration, and with a non-parametric
isotonic calibration. One can observe that only the non-parametric model is able
to provide a probability calibration that returns probabilities close to the
expected 0.5 for most of the samples belonging to the middle cluster with
heterogeneous labels. This results in a significantly improved Brier score.
"""
print(__doc__)
# Author: Mathieu Blondel <[email protected]>
# Alexandre Gramfort <[email protected]>
# Balazs Kegl <[email protected]>
# Jan Hendrik Metzen <[email protected]>
# License: BSD Style.
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
from sklearn.datasets import make_blobs
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import brier_score_loss
from sklearn.calibration import CalibratedClassifierCV
from sklearn.cross_validation import train_test_split
n_samples = 50000
n_bins = 3 # use 3 bins for calibration_curve as we have 3 clusters here
# Generate 3 blobs with 2 classes where the second blob contains
# half positive samples and half negative samples. Probability in this
# blob is therefore 0.5.
centers = [(-5, -5), (0, 0), (5, 5)]
X, y = make_blobs(n_samples=n_samples, n_features=2, cluster_std=1.0,
centers=centers, shuffle=False, random_state=42)
y[:n_samples // 2] = 0
y[n_samples // 2:] = 1
sample_weight = np.random.RandomState(42).rand(y.shape[0])
# split train, test for calibration
X_train, X_test, y_train, y_test, sw_train, sw_test = \
train_test_split(X, y, sample_weight, test_size=0.9, random_state=42)
# Gaussian Naive-Bayes with no calibration
clf = GaussianNB()
clf.fit(X_train, y_train) # GaussianNB itself does not support sample-weights
prob_pos_clf = clf.predict_proba(X_test)[:, 1]
# Gaussian Naive-Bayes with isotonic calibration
clf_isotonic = CalibratedClassifierCV(clf, cv=2, method='isotonic')
clf_isotonic.fit(X_train, y_train, sw_train)
prob_pos_isotonic = clf_isotonic.predict_proba(X_test)[:, 1]
# Gaussian Naive-Bayes with sigmoid calibration
clf_sigmoid = CalibratedClassifierCV(clf, cv=2, method='sigmoid')
clf_sigmoid.fit(X_train, y_train, sw_train)
prob_pos_sigmoid = clf_sigmoid.predict_proba(X_test)[:, 1]
print("Brier scores: (the smaller the better)")
clf_score = brier_score_loss(y_test, prob_pos_clf, sw_test)
print("No calibration: %1.3f" % clf_score)
clf_isotonic_score = brier_score_loss(y_test, prob_pos_isotonic, sw_test)
print("With isotonic calibration: %1.3f" % clf_isotonic_score)
clf_sigmoid_score = brier_score_loss(y_test, prob_pos_sigmoid, sw_test)
print("With sigmoid calibration: %1.3f" % clf_sigmoid_score)
###############################################################################
# Plot the data and the predicted probabilities
plt.figure()
y_unique = np.unique(y)
colors = cm.rainbow(np.linspace(0.0, 1.0, y_unique.size))
for this_y, color in zip(y_unique, colors):
this_X = X_train[y_train == this_y]
this_sw = sw_train[y_train == this_y]
plt.scatter(this_X[:, 0], this_X[:, 1], s=this_sw * 50, c=color, alpha=0.5,
label="Class %s" % this_y)
plt.legend(loc="best")
plt.title("Data")
plt.figure()
order = np.lexsort((prob_pos_clf, ))
plt.plot(prob_pos_clf[order], 'r', label='No calibration (%1.3f)' % clf_score)
plt.plot(prob_pos_isotonic[order], 'g', linewidth=3,
label='Isotonic calibration (%1.3f)' % clf_isotonic_score)
plt.plot(prob_pos_sigmoid[order], 'b', linewidth=3,
label='Sigmoid calibration (%1.3f)' % clf_sigmoid_score)
plt.plot(np.linspace(0, y_test.size, 51)[1::2],
y_test[order].reshape(25, -1).mean(1),
'k', linewidth=3, label=r'Empirical')
plt.ylim([-0.05, 1.05])
plt.xlabel("Instances sorted according to predicted probability "
"(uncalibrated GNB)")
plt.ylabel("P(y=1)")
plt.legend(loc="upper left")
plt.title("Gaussian naive Bayes probabilities")
plt.show()
|
bsd-3-clause
|
adamgreenhall/scikit-learn
|
examples/decomposition/plot_kernel_pca.py
|
353
|
2011
|
"""
==========
Kernel PCA
==========
This example shows that Kernel PCA is able to find a projection of the data
that makes data linearly separable.
"""
print(__doc__)
# Authors: Mathieu Blondel
# Andreas Mueller
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA, KernelPCA
from sklearn.datasets import make_circles
np.random.seed(0)
X, y = make_circles(n_samples=400, factor=.3, noise=.05)
kpca = KernelPCA(kernel="rbf", fit_inverse_transform=True, gamma=10)
X_kpca = kpca.fit_transform(X)
X_back = kpca.inverse_transform(X_kpca)
pca = PCA()
X_pca = pca.fit_transform(X)
# Plot results
plt.figure()
plt.subplot(2, 2, 1, aspect='equal')
plt.title("Original space")
reds = y == 0
blues = y == 1
plt.plot(X[reds, 0], X[reds, 1], "ro")
plt.plot(X[blues, 0], X[blues, 1], "bo")
plt.xlabel("$x_1$")
plt.ylabel("$x_2$")
X1, X2 = np.meshgrid(np.linspace(-1.5, 1.5, 50), np.linspace(-1.5, 1.5, 50))
X_grid = np.array([np.ravel(X1), np.ravel(X2)]).T
# projection on the first principal component (in the phi space)
Z_grid = kpca.transform(X_grid)[:, 0].reshape(X1.shape)
plt.contour(X1, X2, Z_grid, colors='grey', linewidths=1, origin='lower')
plt.subplot(2, 2, 2, aspect='equal')
plt.plot(X_pca[reds, 0], X_pca[reds, 1], "ro")
plt.plot(X_pca[blues, 0], X_pca[blues, 1], "bo")
plt.title("Projection by PCA")
plt.xlabel("1st principal component")
plt.ylabel("2nd component")
plt.subplot(2, 2, 3, aspect='equal')
plt.plot(X_kpca[reds, 0], X_kpca[reds, 1], "ro")
plt.plot(X_kpca[blues, 0], X_kpca[blues, 1], "bo")
plt.title("Projection by KPCA")
plt.xlabel("1st principal component in space induced by $\phi$")
plt.ylabel("2nd component")
plt.subplot(2, 2, 4, aspect='equal')
plt.plot(X_back[reds, 0], X_back[reds, 1], "ro")
plt.plot(X_back[blues, 0], X_back[blues, 1], "bo")
plt.title("Original space after inverse transform")
plt.xlabel("$x_1$")
plt.ylabel("$x_2$")
plt.subplots_adjust(0.02, 0.10, 0.98, 0.94, 0.04, 0.35)
plt.show()
|
bsd-3-clause
|
UltronAI/Deep-Learning
|
Pattern-Recognition/hw2-Feature-Selection/skfeature/example/test_ls_l21.py
|
1
|
1781
|
import scipy.io
from sklearn import svm
from sklearn import cross_validation
from sklearn.metrics import accuracy_score
from skfeature.utility.sparse_learning import *
from skfeature.function.sparse_learning_based import ls_l21
def main():
# load data
mat = scipy.io.loadmat('../data/COIL20.mat')
X = mat['X'] # data
X = X.astype(float)
y = mat['Y'] # label
y = y[:, 0]
Y = construct_label_matrix_pan(y)
n_samples, n_features = X.shape # number of samples and number of features
# split data into 10 folds
ss = cross_validation.KFold(n_samples, n_folds=10, shuffle=True)
# perform evaluation on classification task
num_fea = 100 # number of selected features
clf = svm.LinearSVC() # linear SVM
correct = 0
for train, test in ss:
# obtain the feature weight matrix
Weight, obj, value_gamma = ls_l21.proximal_gradient_descent(X[train], Y[train], 0.1, verbose=False)
# sort the feature scores in an ascending order according to the feature scores
idx = feature_ranking(Weight)
# obtain the dataset on the selected features
selected_features = X[:, idx[0:num_fea]]
# train a classification model with the selected features on the training dataset
clf.fit(selected_features[train], y[train])
# predict the class labels of test data
y_predict = clf.predict(selected_features[test])
# obtain the classification accuracy on the test data
acc = accuracy_score(y[test], y_predict)
correct = correct + acc
# output the average classification accuracy over all 10 folds
print 'Accuracy:', float(correct)/10
if __name__ == '__main__':
main()
|
mit
|
julienr/vispy
|
vispy/testing/__init__.py
|
21
|
2415
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Vispy Development Team.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
"""
Testing
=======
This module provides functions useful for running tests in vispy.
Tests can be run in a few ways:
* From Python, you can import ``vispy`` and do ``vispy.test()``.
* From the source root, you can do ``make test`` which wraps to
a call to ``python make test``.
There are various diffrent testing "modes", including:
* "full": run all tests.
* any backend name (e.g., "glfw"): run application/GL tests using a
specific backend.
* "nobackend": run tests that do not require a backend.
* "examples": run repo examples to check for errors and warnings.
* "flake": check style errors.
Examples get automatically tested unless they have a special comment toward
the top ``# vispy: testskip``. Examples that should be tested should be
formatted so that 1) a ``Canvas`` class is defined, or a ``canvas`` class
is instantiated; and 2) the ``app.run()`` call is protected by a check
if ``__name__ == "__main__"``. This makes it so that the event loop is not
started when running examples in the test suite -- the test suite instead
manually updates the canvas (using ``app.process_events()``) for under one
second to ensure that things like timer events are processed.
For examples on how to test various bits of functionality (e.g., application
functionality, or drawing things with OpenGL), it's best to look at existing
examples in the test suite.
The code base gets automatically tested by Travis-CI (Linux) and AppVeyor
(Windows) on Python 2.6, 2.7, 3.4. There are multiple testing modes that
use e.g. full dependencies, minimal dependencies, etc. See ``.travis.yml``
to determine what automatic tests are run.
"""
from ._testing import (SkipTest, requires_application, requires_ipython, # noqa
requires_img_lib, # noqa
has_backend, requires_pyopengl, # noqa
requires_scipy, has_matplotlib, # noqa
save_testing_image, TestingCanvas, has_pyopengl, # noqa
run_tests_if_main,
assert_is, assert_in, assert_not_in, assert_equal,
assert_not_equal, assert_raises, assert_true, # noqa
raises) # noqa
from ._runners import test # noqa
|
bsd-3-clause
|
anurag313/scikit-learn
|
sklearn/tree/export.py
|
78
|
15814
|
"""
This module defines export functions for decision trees.
"""
# Authors: Gilles Louppe <[email protected]>
# Peter Prettenhofer <[email protected]>
# Brian Holt <[email protected]>
# Noel Dawe <[email protected]>
# Satrajit Gosh <[email protected]>
# Trevor Stephens <[email protected]>
# Licence: BSD 3 clause
import numpy as np
from ..externals import six
from . import _criterion
from . import _tree
def _color_brew(n):
"""Generate n colors with equally spaced hues.
Parameters
----------
n : int
The number of colors required.
Returns
-------
color_list : list, length n
List of n tuples of form (R, G, B) being the components of each color.
"""
color_list = []
# Initialize saturation & value; calculate chroma & value shift
s, v = 0.75, 0.9
c = s * v
m = v - c
for h in np.arange(25, 385, 360. / n).astype(int):
# Calculate some intermediate values
h_bar = h / 60.
x = c * (1 - abs((h_bar % 2) - 1))
# Initialize RGB with same hue & chroma as our color
rgb = [(c, x, 0),
(x, c, 0),
(0, c, x),
(0, x, c),
(x, 0, c),
(c, 0, x),
(c, x, 0)]
r, g, b = rgb[int(h_bar)]
# Shift the initial RGB values to match value and store
rgb = [(int(255 * (r + m))),
(int(255 * (g + m))),
(int(255 * (b + m)))]
color_list.append(rgb)
return color_list
def export_graphviz(decision_tree, out_file="tree.dot", max_depth=None,
feature_names=None, class_names=None, label='all',
filled=False, leaves_parallel=False, impurity=True,
node_ids=False, proportion=False, rotate=False,
rounded=False, special_characters=False):
"""Export a decision tree in DOT format.
This function generates a GraphViz representation of the decision tree,
which is then written into `out_file`. Once exported, graphical renderings
can be generated using, for example::
$ dot -Tps tree.dot -o tree.ps (PostScript format)
$ dot -Tpng tree.dot -o tree.png (PNG format)
The sample counts that are shown are weighted with any sample_weights that
might be present.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
decision_tree : decision tree classifier
The decision tree to be exported to GraphViz.
out_file : file object or string, optional (default="tree.dot")
Handle or name of the output file.
max_depth : int, optional (default=None)
The maximum depth of the representation. If None, the tree is fully
generated.
feature_names : list of strings, optional (default=None)
Names of each of the features.
class_names : list of strings, bool or None, optional (default=None)
Names of each of the target classes in ascending numerical order.
Only relevant for classification and not supported for multi-output.
If ``True``, shows a symbolic representation of the class name.
label : {'all', 'root', 'none'}, optional (default='all')
Whether to show informative labels for impurity, etc.
Options include 'all' to show at every node, 'root' to show only at
the top root node, or 'none' to not show at any node.
filled : bool, optional (default=False)
When set to ``True``, paint nodes to indicate majority class for
classification, extremity of values for regression, or purity of node
for multi-output.
leaves_parallel : bool, optional (default=False)
When set to ``True``, draw all leaf nodes at the bottom of the tree.
impurity : bool, optional (default=True)
When set to ``True``, show the impurity at each node.
node_ids : bool, optional (default=False)
When set to ``True``, show the ID number on each node.
proportion : bool, optional (default=False)
When set to ``True``, change the display of 'values' and/or 'samples'
to be proportions and percentages respectively.
rotate : bool, optional (default=False)
When set to ``True``, orient tree left to right rather than top-down.
rounded : bool, optional (default=False)
When set to ``True``, draw node boxes with rounded corners and use
Helvetica fonts instead of Times-Roman.
special_characters : bool, optional (default=False)
When set to ``False``, ignore special characters for PostScript
compatibility.
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn import tree
>>> clf = tree.DecisionTreeClassifier()
>>> iris = load_iris()
>>> clf = clf.fit(iris.data, iris.target)
>>> tree.export_graphviz(clf,
... out_file='tree.dot') # doctest: +SKIP
"""
def get_color(value):
# Find the appropriate color & intensity for a node
if colors['bounds'] is None:
# Classification tree
color = list(colors['rgb'][np.argmax(value)])
sorted_values = sorted(value, reverse=True)
alpha = int(255 * (sorted_values[0] - sorted_values[1]) /
(1 - sorted_values[1]))
else:
# Regression tree or multi-output
color = list(colors['rgb'][0])
alpha = int(255 * ((value - colors['bounds'][0]) /
(colors['bounds'][1] - colors['bounds'][0])))
# Return html color code in #RRGGBBAA format
color.append(alpha)
hex_codes = [str(i) for i in range(10)]
hex_codes.extend(['a', 'b', 'c', 'd', 'e', 'f'])
color = [hex_codes[c // 16] + hex_codes[c % 16] for c in color]
return '#' + ''.join(color)
def node_to_str(tree, node_id, criterion):
# Generate the node content string
if tree.n_outputs == 1:
value = tree.value[node_id][0, :]
else:
value = tree.value[node_id]
# Should labels be shown?
labels = (label == 'root' and node_id == 0) or label == 'all'
# PostScript compatibility for special characters
if special_characters:
characters = ['#', '<SUB>', '</SUB>', '≤', '<br/>', '>']
node_string = '<'
else:
characters = ['#', '[', ']', '<=', '\\n', '"']
node_string = '"'
# Write node ID
if node_ids:
if labels:
node_string += 'node '
node_string += characters[0] + str(node_id) + characters[4]
# Write decision criteria
if tree.children_left[node_id] != _tree.TREE_LEAF:
# Always write node decision criteria, except for leaves
if feature_names is not None:
feature = feature_names[tree.feature[node_id]]
else:
feature = "X%s%s%s" % (characters[1],
tree.feature[node_id],
characters[2])
node_string += '%s %s %s%s' % (feature,
characters[3],
round(tree.threshold[node_id], 4),
characters[4])
# Write impurity
if impurity:
if isinstance(criterion, _criterion.FriedmanMSE):
criterion = "friedman_mse"
elif not isinstance(criterion, six.string_types):
criterion = "impurity"
if labels:
node_string += '%s = ' % criterion
node_string += (str(round(tree.impurity[node_id], 4)) +
characters[4])
# Write node sample count
if labels:
node_string += 'samples = '
if proportion:
percent = (100. * tree.n_node_samples[node_id] /
float(tree.n_node_samples[0]))
node_string += (str(round(percent, 1)) + '%' +
characters[4])
else:
node_string += (str(tree.n_node_samples[node_id]) +
characters[4])
# Write node class distribution / regression value
if proportion and tree.n_classes[0] != 1:
# For classification this will show the proportion of samples
value = value / tree.weighted_n_node_samples[node_id]
if labels:
node_string += 'value = '
if tree.n_classes[0] == 1:
# Regression
value_text = np.around(value, 4)
elif proportion:
# Classification
value_text = np.around(value, 2)
elif np.all(np.equal(np.mod(value, 1), 0)):
# Classification without floating-point weights
value_text = value.astype(int)
else:
# Classification with floating-point weights
value_text = np.around(value, 4)
# Strip whitespace
value_text = str(value_text.astype('S32')).replace("b'", "'")
value_text = value_text.replace("' '", ", ").replace("'", "")
if tree.n_classes[0] == 1 and tree.n_outputs == 1:
value_text = value_text.replace("[", "").replace("]", "")
value_text = value_text.replace("\n ", characters[4])
node_string += value_text + characters[4]
# Write node majority class
if (class_names is not None and
tree.n_classes[0] != 1 and
tree.n_outputs == 1):
# Only done for single-output classification trees
if labels:
node_string += 'class = '
if class_names is not True:
class_name = class_names[np.argmax(value)]
else:
class_name = "y%s%s%s" % (characters[1],
np.argmax(value),
characters[2])
node_string += class_name
# Clean up any trailing newlines
if node_string[-2:] == '\\n':
node_string = node_string[:-2]
if node_string[-5:] == '<br/>':
node_string = node_string[:-5]
return node_string + characters[5]
def recurse(tree, node_id, criterion, parent=None, depth=0):
if node_id == _tree.TREE_LEAF:
raise ValueError("Invalid node_id %s" % _tree.TREE_LEAF)
left_child = tree.children_left[node_id]
right_child = tree.children_right[node_id]
# Add node with description
if max_depth is None or depth <= max_depth:
# Collect ranks for 'leaf' option in plot_options
if left_child == _tree.TREE_LEAF:
ranks['leaves'].append(str(node_id))
elif str(depth) not in ranks:
ranks[str(depth)] = [str(node_id)]
else:
ranks[str(depth)].append(str(node_id))
out_file.write('%d [label=%s'
% (node_id,
node_to_str(tree, node_id, criterion)))
if filled:
# Fetch appropriate color for node
if 'rgb' not in colors:
# Initialize colors and bounds if required
colors['rgb'] = _color_brew(tree.n_classes[0])
if tree.n_outputs != 1:
# Find max and min impurities for multi-output
colors['bounds'] = (np.min(-tree.impurity),
np.max(-tree.impurity))
elif tree.n_classes[0] == 1:
# Find max and min values in leaf nodes for regression
colors['bounds'] = (np.min(tree.value),
np.max(tree.value))
if tree.n_outputs == 1:
node_val = (tree.value[node_id][0, :] /
tree.weighted_n_node_samples[node_id])
if tree.n_classes[0] == 1:
# Regression
node_val = tree.value[node_id][0, :]
else:
# If multi-output color node by impurity
node_val = -tree.impurity[node_id]
out_file.write(', fillcolor="%s"' % get_color(node_val))
out_file.write('] ;\n')
if parent is not None:
# Add edge to parent
out_file.write('%d -> %d' % (parent, node_id))
if parent == 0:
# Draw True/False labels if parent is root node
angles = np.array([45, -45]) * ((rotate - .5) * -2)
out_file.write(' [labeldistance=2.5, labelangle=')
if node_id == 1:
out_file.write('%d, headlabel="True"]' % angles[0])
else:
out_file.write('%d, headlabel="False"]' % angles[1])
out_file.write(' ;\n')
if left_child != _tree.TREE_LEAF:
recurse(tree, left_child, criterion=criterion, parent=node_id,
depth=depth + 1)
recurse(tree, right_child, criterion=criterion, parent=node_id,
depth=depth + 1)
else:
ranks['leaves'].append(str(node_id))
out_file.write('%d [label="(...)"' % node_id)
if filled:
# color cropped nodes grey
out_file.write(', fillcolor="#C0C0C0"')
out_file.write('] ;\n' % node_id)
if parent is not None:
# Add edge to parent
out_file.write('%d -> %d ;\n' % (parent, node_id))
own_file = False
try:
if isinstance(out_file, six.string_types):
if six.PY3:
out_file = open(out_file, "w", encoding="utf-8")
else:
out_file = open(out_file, "wb")
own_file = True
# The depth of each node for plotting with 'leaf' option
ranks = {'leaves': []}
# The colors to render each node with
colors = {'bounds': None}
out_file.write('digraph Tree {\n')
# Specify node aesthetics
out_file.write('node [shape=box')
rounded_filled = []
if filled:
rounded_filled.append('filled')
if rounded:
rounded_filled.append('rounded')
if len(rounded_filled) > 0:
out_file.write(', style="%s", color="black"'
% ", ".join(rounded_filled))
if rounded:
out_file.write(', fontname=helvetica')
out_file.write('] ;\n')
# Specify graph & edge aesthetics
if leaves_parallel:
out_file.write('graph [ranksep=equally, splines=polyline] ;\n')
if rounded:
out_file.write('edge [fontname=helvetica] ;\n')
if rotate:
out_file.write('rankdir=LR ;\n')
# Now recurse the tree and add node & edge attributes
if isinstance(decision_tree, _tree.Tree):
recurse(decision_tree, 0, criterion="impurity")
else:
recurse(decision_tree.tree_, 0, criterion=decision_tree.criterion)
# If required, draw leaf nodes at same depth as each other
if leaves_parallel:
for rank in sorted(ranks):
out_file.write("{rank=same ; " +
"; ".join(r for r in ranks[rank]) + "} ;\n")
out_file.write("}")
finally:
if own_file:
out_file.close()
|
bsd-3-clause
|
rahul-c1/scikit-learn
|
examples/datasets/plot_random_dataset.py
|
348
|
2254
|
"""
==============================================
Plot randomly generated classification dataset
==============================================
Plot several randomly generated 2D classification datasets.
This example illustrates the :func:`datasets.make_classification`
:func:`datasets.make_blobs` and :func:`datasets.make_gaussian_quantiles`
functions.
For ``make_classification``, three binary and two multi-class classification
datasets are generated, with different numbers of informative features and
clusters per class. """
print(__doc__)
import matplotlib.pyplot as plt
from sklearn.datasets import make_classification
from sklearn.datasets import make_blobs
from sklearn.datasets import make_gaussian_quantiles
plt.figure(figsize=(8, 8))
plt.subplots_adjust(bottom=.05, top=.9, left=.05, right=.95)
plt.subplot(321)
plt.title("One informative feature, one cluster per class", fontsize='small')
X1, Y1 = make_classification(n_features=2, n_redundant=0, n_informative=1,
n_clusters_per_class=1)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(322)
plt.title("Two informative features, one cluster per class", fontsize='small')
X1, Y1 = make_classification(n_features=2, n_redundant=0, n_informative=2,
n_clusters_per_class=1)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(323)
plt.title("Two informative features, two clusters per class", fontsize='small')
X2, Y2 = make_classification(n_features=2, n_redundant=0, n_informative=2)
plt.scatter(X2[:, 0], X2[:, 1], marker='o', c=Y2)
plt.subplot(324)
plt.title("Multi-class, two informative features, one cluster",
fontsize='small')
X1, Y1 = make_classification(n_features=2, n_redundant=0, n_informative=2,
n_clusters_per_class=1, n_classes=3)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(325)
plt.title("Three blobs", fontsize='small')
X1, Y1 = make_blobs(n_features=2, centers=3)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(326)
plt.title("Gaussian divided into three quantiles", fontsize='small')
X1, Y1 = make_gaussian_quantiles(n_features=2, n_classes=3)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.show()
|
bsd-3-clause
|
bhargav/scikit-learn
|
sklearn/semi_supervised/label_propagation.py
|
14
|
15965
|
# coding=utf8
"""
Label propagation in the context of this module refers to a set of
semisupervised classification algorithms. In the high level, these algorithms
work by forming a fully-connected graph between all points given and solving
for the steady-state distribution of labels at each point.
These algorithms perform very well in practice. The cost of running can be very
expensive, at approximately O(N^3) where N is the number of (labeled and
unlabeled) points. The theory (why they perform so well) is motivated by
intuitions from random walk algorithms and geometric relationships in the data.
For more information see the references below.
Model Features
--------------
Label clamping:
The algorithm tries to learn distributions of labels over the dataset. In the
"Hard Clamp" mode, the true ground labels are never allowed to change. They
are clamped into position. In the "Soft Clamp" mode, they are allowed some
wiggle room, but some alpha of their original value will always be retained.
Hard clamp is the same as soft clamping with alpha set to 1.
Kernel:
A function which projects a vector into some higher dimensional space. This
implementation supprots RBF and KNN kernels. Using the RBF kernel generates
a dense matrix of size O(N^2). KNN kernel will generate a sparse matrix of
size O(k*N) which will run much faster. See the documentation for SVMs for
more info on kernels.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelPropagation
>>> label_prop_model = LabelPropagation()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.random_integers(0, 1,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelPropagation(...)
Notes
-----
References:
[1] Yoshua Bengio, Olivier Delalleau, Nicolas Le Roux. In Semi-Supervised
Learning (2006), pp. 193-216
[2] Olivier Delalleau, Yoshua Bengio, Nicolas Le Roux. Efficient
Non-Parametric Function Induction in Semi-Supervised Learning. AISTAT 2005
"""
# Authors: Clay Woolam <[email protected]>
# Licence: BSD
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy import sparse
from ..base import BaseEstimator, ClassifierMixin
from ..externals import six
from ..metrics.pairwise import rbf_kernel
from ..neighbors.unsupervised import NearestNeighbors
from ..utils.extmath import safe_sparse_dot
from ..utils.graph import graph_laplacian
from ..utils.multiclass import check_classification_targets
from ..utils.validation import check_X_y, check_is_fitted, check_array
# Helper functions
def _not_converged(y_truth, y_prediction, tol=1e-3):
"""basic convergence check"""
return np.abs(y_truth - y_prediction).sum() > tol
class BaseLabelPropagation(six.with_metaclass(ABCMeta, BaseEstimator,
ClassifierMixin)):
"""Base class for label propagation module.
Parameters
----------
kernel : {'knn', 'rbf'}
String identifier for kernel function to use.
Only 'rbf' and 'knn' kernels are currently supported..
gamma : float
Parameter for rbf kernel
alpha : float
Clamping factor
max_iter : float
Change maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
n_neighbors : integer > 0
Parameter for knn kernel
n_jobs : int, optional (default = 1)
The number of parallel jobs to run.
If ``-1``, then the number of jobs is set to the number of CPU cores.
"""
def __init__(self, kernel='rbf', gamma=20, n_neighbors=7,
alpha=1, max_iter=30, tol=1e-3, n_jobs=1):
self.max_iter = max_iter
self.tol = tol
# kernel parameters
self.kernel = kernel
self.gamma = gamma
self.n_neighbors = n_neighbors
# clamping factor
self.alpha = alpha
self.n_jobs = n_jobs
def _get_kernel(self, X, y=None):
if self.kernel == "rbf":
if y is None:
return rbf_kernel(X, X, gamma=self.gamma)
else:
return rbf_kernel(X, y, gamma=self.gamma)
elif self.kernel == "knn":
if self.nn_fit is None:
self.nn_fit = NearestNeighbors(self.n_neighbors,
n_jobs=self.n_jobs).fit(X)
if y is None:
return self.nn_fit.kneighbors_graph(self.nn_fit._fit_X,
self.n_neighbors,
mode='connectivity')
else:
return self.nn_fit.kneighbors(y, return_distance=False)
else:
raise ValueError("%s is not a valid kernel. Only rbf and knn"
" are supported at this time" % self.kernel)
@abstractmethod
def _build_graph(self):
raise NotImplementedError("Graph construction must be implemented"
" to fit a label propagation model.")
def predict(self, X):
"""Performs inductive inference across the model.
Parameters
----------
X : array_like, shape = [n_samples, n_features]
Returns
-------
y : array_like, shape = [n_samples]
Predictions for input data
"""
probas = self.predict_proba(X)
return self.classes_[np.argmax(probas, axis=1)].ravel()
def predict_proba(self, X):
"""Predict probability for each possible outcome.
Compute the probability estimates for each single sample in X
and each possible outcome seen during training (categorical
distribution).
Parameters
----------
X : array_like, shape = [n_samples, n_features]
Returns
-------
probabilities : array, shape = [n_samples, n_classes]
Normalized probability distributions across
class labels
"""
check_is_fitted(self, 'X_')
X_2d = check_array(X, accept_sparse=['csc', 'csr', 'coo', 'dok',
'bsr', 'lil', 'dia'])
weight_matrices = self._get_kernel(self.X_, X_2d)
if self.kernel == 'knn':
probabilities = []
for weight_matrix in weight_matrices:
ine = np.sum(self.label_distributions_[weight_matrix], axis=0)
probabilities.append(ine)
probabilities = np.array(probabilities)
else:
weight_matrices = weight_matrices.T
probabilities = np.dot(weight_matrices, self.label_distributions_)
normalizer = np.atleast_2d(np.sum(probabilities, axis=1)).T
probabilities /= normalizer
return probabilities
def fit(self, X, y):
"""Fit a semi-supervised label propagation model based
All the input data is provided matrix X (labeled and unlabeled)
and corresponding label matrix y with a dedicated marker value for
unlabeled samples.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
A {n_samples by n_samples} size matrix will be created from this
y : array_like, shape = [n_samples]
n_labeled_samples (unlabeled points are marked as -1)
All unlabeled samples will be transductively assigned labels
Returns
-------
self : returns an instance of self.
"""
X, y = check_X_y(X, y)
self.X_ = X
check_classification_targets(y)
# actual graph construction (implementations should override this)
graph_matrix = self._build_graph()
# label construction
# construct a categorical distribution for classification only
classes = np.unique(y)
classes = (classes[classes != -1])
self.classes_ = classes
n_samples, n_classes = len(y), len(classes)
y = np.asarray(y)
unlabeled = y == -1
clamp_weights = np.ones((n_samples, 1))
clamp_weights[unlabeled, 0] = self.alpha
# initialize distributions
self.label_distributions_ = np.zeros((n_samples, n_classes))
for label in classes:
self.label_distributions_[y == label, classes == label] = 1
y_static = np.copy(self.label_distributions_)
if self.alpha > 0.:
y_static *= 1 - self.alpha
y_static[unlabeled] = 0
l_previous = np.zeros((self.X_.shape[0], n_classes))
remaining_iter = self.max_iter
if sparse.isspmatrix(graph_matrix):
graph_matrix = graph_matrix.tocsr()
while (_not_converged(self.label_distributions_, l_previous, self.tol)
and remaining_iter > 1):
l_previous = self.label_distributions_
self.label_distributions_ = safe_sparse_dot(
graph_matrix, self.label_distributions_)
# clamp
self.label_distributions_ = np.multiply(
clamp_weights, self.label_distributions_) + y_static
remaining_iter -= 1
normalizer = np.sum(self.label_distributions_, axis=1)[:, np.newaxis]
self.label_distributions_ /= normalizer
# set the transduction item
transduction = self.classes_[np.argmax(self.label_distributions_,
axis=1)]
self.transduction_ = transduction.ravel()
self.n_iter_ = self.max_iter - remaining_iter
return self
class LabelPropagation(BaseLabelPropagation):
"""Label Propagation classifier
Read more in the :ref:`User Guide <label_propagation>`.
Parameters
----------
kernel : {'knn', 'rbf'}
String identifier for kernel function to use.
Only 'rbf' and 'knn' kernels are currently supported..
gamma : float
Parameter for rbf kernel
n_neighbors : integer > 0
Parameter for knn kernel
alpha : float
Clamping factor
max_iter : float
Change maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
Attributes
----------
X_ : array, shape = [n_samples, n_features]
Input array.
classes_ : array, shape = [n_classes]
The distinct labels used in classifying instances.
label_distributions_ : array, shape = [n_samples, n_classes]
Categorical distribution for each item.
transduction_ : array, shape = [n_samples]
Label assigned to each item via the transduction.
n_iter_ : int
Number of iterations run.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelPropagation
>>> label_prop_model = LabelPropagation()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.random_integers(0, 1,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelPropagation(...)
References
----------
Xiaojin Zhu and Zoubin Ghahramani. Learning from labeled and unlabeled data
with label propagation. Technical Report CMU-CALD-02-107, Carnegie Mellon
University, 2002 http://pages.cs.wisc.edu/~jerryzhu/pub/CMU-CALD-02-107.pdf
See Also
--------
LabelSpreading : Alternate label propagation strategy more robust to noise
"""
def _build_graph(self):
"""Matrix representing a fully connected graph between each sample
This basic implementation creates a non-stochastic affinity matrix, so
class distributions will exceed 1 (normalization may be desired).
"""
if self.kernel == 'knn':
self.nn_fit = None
affinity_matrix = self._get_kernel(self.X_)
normalizer = affinity_matrix.sum(axis=0)
if sparse.isspmatrix(affinity_matrix):
affinity_matrix.data /= np.diag(np.array(normalizer))
else:
affinity_matrix /= normalizer[:, np.newaxis]
return affinity_matrix
class LabelSpreading(BaseLabelPropagation):
"""LabelSpreading model for semi-supervised learning
This model is similar to the basic Label Propgation algorithm,
but uses affinity matrix based on the normalized graph Laplacian
and soft clamping across the labels.
Read more in the :ref:`User Guide <label_propagation>`.
Parameters
----------
kernel : {'knn', 'rbf'}
String identifier for kernel function to use.
Only 'rbf' and 'knn' kernels are currently supported.
gamma : float
parameter for rbf kernel
n_neighbors : integer > 0
parameter for knn kernel
alpha : float
clamping factor
max_iter : float
maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
n_jobs : int, optional (default = 1)
The number of parallel jobs to run.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Attributes
----------
X_ : array, shape = [n_samples, n_features]
Input array.
classes_ : array, shape = [n_classes]
The distinct labels used in classifying instances.
label_distributions_ : array, shape = [n_samples, n_classes]
Categorical distribution for each item.
transduction_ : array, shape = [n_samples]
Label assigned to each item via the transduction.
n_iter_ : int
Number of iterations run.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelSpreading
>>> label_prop_model = LabelSpreading()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.random_integers(0, 1,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelSpreading(...)
References
----------
Dengyong Zhou, Olivier Bousquet, Thomas Navin Lal, Jason Weston,
Bernhard Schoelkopf. Learning with local and global consistency (2004)
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.115.3219
See Also
--------
LabelPropagation : Unregularized graph based semi-supervised learning
"""
def __init__(self, kernel='rbf', gamma=20, n_neighbors=7, alpha=0.2,
max_iter=30, tol=1e-3, n_jobs=1):
# this one has different base parameters
super(LabelSpreading, self).__init__(kernel=kernel, gamma=gamma,
n_neighbors=n_neighbors,
alpha=alpha, max_iter=max_iter,
tol=tol,
n_jobs=n_jobs)
def _build_graph(self):
"""Graph matrix for Label Spreading computes the graph laplacian"""
# compute affinity matrix (or gram matrix)
if self.kernel == 'knn':
self.nn_fit = None
n_samples = self.X_.shape[0]
affinity_matrix = self._get_kernel(self.X_)
laplacian = graph_laplacian(affinity_matrix, normed=True)
laplacian = -laplacian
if sparse.isspmatrix(laplacian):
diag_mask = (laplacian.row == laplacian.col)
laplacian.data[diag_mask] = 0.0
else:
laplacian.flat[::n_samples + 1] = 0.0 # set diag to 0.0
return laplacian
|
bsd-3-clause
|
xgcm/xhistogram
|
xhistogram/test/test_core.py
|
1
|
12013
|
import numpy as np
import pandas as pd
from itertools import combinations
import dask.array as dsa
from ..core import (
histogram,
_ensure_correctly_formatted_bins,
_ensure_correctly_formatted_range,
)
from .fixtures import empty_dask_array
import pytest
bins_int = 10
bins_str = "auto"
bins_arr = np.linspace(-4, 4, 10)
range_ = (0, 1)
@pytest.mark.parametrize("density", [False, True])
@pytest.mark.parametrize("block_size", [None, 1, 2])
@pytest.mark.parametrize("axis", [1, None])
@pytest.mark.parametrize("bins", [10, np.linspace(-4, 4, 10), "auto"])
@pytest.mark.parametrize("range_", [None, (-4, 4)])
@pytest.mark.parametrize("add_nans", [False, True])
def test_histogram_results_1d(block_size, density, axis, bins, range_, add_nans):
nrows, ncols = 5, 20
# Setting the random seed here prevents np.testing.assert_allclose
# from failing beow. We should investigate this further.
np.random.seed(2)
data = np.random.randn(nrows, ncols)
if add_nans:
N_nans = 20
data.ravel()[np.random.choice(data.size, N_nans, replace=False)] = np.nan
bins = np.linspace(-4, 4, 10)
h, bin_edges = histogram(
data, bins=bins, range=range_, axis=axis, block_size=block_size, density=density
)
expected_shape = (
(nrows, len(bin_edges[0]) - 1) if axis == 1 else (len(bin_edges[0]) - 1,)
)
assert h.shape == expected_shape
# make sure we get the same thing as numpy.histogram
if axis:
bins_np = np.histogram_bin_edges(
data, bins=bins, range=range_
) # Use same bins for all slices below
expected = np.stack(
[
np.histogram(data[i], bins=bins_np, range=range_, density=density)[0]
for i in range(nrows)
]
)
else:
expected = np.histogram(data, bins=bins, range=range_, density=density)[0]
np.testing.assert_allclose(h, expected)
if density:
widths = np.diff(bins)
integral = np.sum(h * widths, axis)
np.testing.assert_allclose(integral, 1.0)
@pytest.mark.parametrize("block_size", [None, 1, 2])
def test_histogram_results_1d_weighted(block_size):
nrows, ncols = 5, 20
data = np.random.randn(nrows, ncols)
bins = np.linspace(-4, 4, 10)
h, _ = histogram(data, bins=bins, axis=1, block_size=block_size)
weights = 2 * np.ones_like(data)
h_w, _ = histogram(data, bins=bins, axis=1, weights=weights, block_size=block_size)
np.testing.assert_array_equal(2 * h, h_w)
# @pytest.mark.skip(reason="Weight broadcasting on numpy arrays is not yet implemented")
@pytest.mark.parametrize("block_size", [None, 1, 2, "auto"])
def test_histogram_results_1d_weighted_broadcasting(block_size):
nrows, ncols = 5, 20
data = np.random.randn(nrows, ncols)
bins = np.linspace(-4, 4, 10)
h, _ = histogram(data, bins=bins, axis=1, block_size=block_size)
weights = 2 * np.ones((1, ncols))
h_w, _ = histogram(data, bins=bins, axis=1, weights=weights, block_size=block_size)
np.testing.assert_array_equal(2 * h, h_w)
@pytest.mark.parametrize("block_size", [None, 1, 2])
def test_histogram_right_edge(block_size):
"""Test that last bin is both left- and right-edge inclusive as it
is for numpy.histogram
"""
nrows, ncols = 5, 20
data = np.ones((nrows, ncols))
bins = np.array([0, 0.5, 1]) # All data at rightmost edge
h, _ = histogram(data, bins=bins, axis=1, block_size=block_size)
assert h.shape == (nrows, len(bins) - 1)
# make sure we get the same thing as histogram (all data in the last bin)
hist, _ = np.histogram(data, bins=bins)
np.testing.assert_array_equal(hist, h.sum(axis=0))
# now try with no axis
h_na, _ = histogram(data, bins=bins, block_size=block_size)
np.testing.assert_array_equal(hist, h_na)
def test_histogram_results_2d():
nrows, ncols = 5, 20
data_a = np.random.randn(nrows, ncols)
data_b = np.random.randn(nrows, ncols)
nbins_a = 9
bins_a = np.linspace(-4, 4, nbins_a + 1)
nbins_b = 10
bins_b = np.linspace(-4, 4, nbins_b + 1)
h, _ = histogram(data_a, data_b, bins=[bins_a, bins_b])
assert h.shape == (nbins_a, nbins_b)
hist, _, _ = np.histogram2d(data_a.ravel(), data_b.ravel(), bins=[bins_a, bins_b])
np.testing.assert_array_equal(hist, h)
@pytest.mark.parametrize("dask", [False, True])
def test_histogram_results_2d_broadcasting(dask):
nrows, ncols = 5, 20
data_a = np.random.randn(ncols)
data_b = np.random.randn(nrows, ncols)
nbins_a = 9
bins_a = np.linspace(-4, 4, nbins_a + 1)
nbins_b = 10
bins_b = np.linspace(-4, 4, nbins_b + 1)
if dask:
test_data_a = dsa.from_array(data_a, chunks=3)
test_data_b = dsa.from_array(data_b, chunks=(2, 7))
else:
test_data_a = data_a
test_data_b = data_b
h, _ = histogram(test_data_a, test_data_b, bins=[bins_a, bins_b])
assert h.shape == (nbins_a, nbins_b)
hist, _, _ = np.histogram2d(
np.broadcast_to(data_a, data_b.shape).ravel(),
data_b.ravel(),
bins=[bins_a, bins_b],
)
np.testing.assert_array_equal(hist, h)
@pytest.mark.parametrize("add_nans", [False, True])
def test_histogram_results_2d_density(add_nans):
nrows, ncols = 5, 20
data_a = np.random.randn(nrows, ncols)
data_b = np.random.randn(nrows, ncols)
if add_nans:
N_nans = 20
data_a.ravel()[np.random.choice(data_a.size, N_nans, replace=False)] = np.nan
data_b.ravel()[np.random.choice(data_b.size, N_nans, replace=False)] = np.nan
nbins_a = 9
bins_a = np.linspace(-4, 4, nbins_a + 1)
nbins_b = 10
bins_b = np.linspace(-4, 4, nbins_b + 1)
h, _ = histogram(data_a, data_b, bins=[bins_a, bins_b], density=True)
assert h.shape == (nbins_a, nbins_b)
hist, _, _ = np.histogram2d(
data_a.ravel(), data_b.ravel(), bins=[bins_a, bins_b], density=True
)
np.testing.assert_allclose(hist, h)
# check integral is 1
widths_a = np.diff(bins_a)
widths_b = np.diff(bins_b)
areas = np.outer(widths_a, widths_b)
integral = np.sum(hist * areas)
np.testing.assert_allclose(integral, 1.0)
@pytest.mark.parametrize("add_nans", [False, True])
def test_histogram_results_3d_density(add_nans):
nrows, ncols = 5, 20
data_a = np.random.randn(nrows, ncols)
data_b = np.random.randn(nrows, ncols)
data_c = np.random.randn(nrows, ncols)
if add_nans:
N_nans = 20
data_a.ravel()[np.random.choice(data_a.size, N_nans, replace=False)] = np.nan
data_b.ravel()[np.random.choice(data_b.size, N_nans, replace=False)] = np.nan
data_c.ravel()[np.random.choice(data_c.size, N_nans, replace=False)] = np.nan
nbins_a = 9
bins_a = np.linspace(-4, 4, nbins_a + 1)
nbins_b = 10
bins_b = np.linspace(-4, 4, nbins_b + 1)
nbins_c = 9
bins_c = np.linspace(-4, 4, nbins_c + 1)
h, _ = histogram(
data_a, data_b, data_c, bins=[bins_a, bins_b, bins_c], density=True
)
assert h.shape == (nbins_a, nbins_b, nbins_c)
hist, _ = np.histogramdd(
(data_a.ravel(), data_b.ravel(), data_c.ravel()),
bins=[bins_a, bins_b, bins_c],
density=True,
)
np.testing.assert_allclose(hist, h)
# check integral is 1
widths_a = np.diff(bins_a)
widths_b = np.diff(bins_b)
widths_c = np.diff(bins_c)
areas = np.einsum("i,j,k", widths_a, widths_b, widths_c)
integral = np.sum(hist * areas)
np.testing.assert_allclose(integral, 1.0)
@pytest.mark.parametrize("block_size", [None, 5, "auto"])
@pytest.mark.parametrize("use_dask", [False, True])
def test_histogram_shape(use_dask, block_size):
"""These tests just verify that arrays with the right shape come out.
They don't verify correctness."""
shape = 10, 15, 12, 20
if use_dask:
b = empty_dask_array(shape, chunks=(1,) + shape[1:])
else:
b = np.random.randn(*shape)
bins = np.linspace(-4, 4, 27)
# no axis
c, _ = histogram(b, bins=bins, block_size=block_size)
assert c.shape == (len(bins) - 1,)
# same thing
for axis in [(0, 1, 2, 3), (0, 1, 3, 2), (3, 2, 1, 0), (3, 2, 0, 1)]:
c, _ = histogram(b, bins=bins, axis=axis)
assert c.shape == (len(bins) - 1,)
if use_dask:
assert isinstance(c, dsa.Array)
# scalar axis (check positive and negative)
for axis in list(range(4)) + list(range(-1, -5, -1)):
c, _ = histogram(b, bins=bins, axis=axis, block_size=block_size)
shape = list(b.shape)
del shape[axis]
expected_shape = tuple(shape) + (len(bins) - 1,)
assert c.shape == expected_shape
if use_dask:
assert isinstance(c, dsa.Array)
# two axes
for i, j in combinations(range(4), 2):
axis = (i, j)
c, _ = histogram(b, bins=bins, axis=axis, block_size=block_size)
shape = list(b.shape)
partial_shape = [shape[k] for k in range(b.ndim) if k not in axis]
expected_shape = tuple(partial_shape) + (len(bins) - 1,)
assert c.shape == expected_shape
if use_dask:
assert isinstance(c, dsa.Array)
def test_histogram_dask():
"""Test that fails with dask arrays and inappropriate bins"""
shape = 10, 15, 12, 20
b = empty_dask_array(shape, chunks=(1,) + shape[1:])
histogram(b, bins=bins_arr) # Should work when bins is all numpy arrays
with pytest.raises(TypeError): # Should fail otherwise
histogram(b, bins=bins_int)
histogram(b, bins=bins_str)
histogram(b, b, bins=[bins_arr, bins_int])
@pytest.mark.parametrize(
"in_out",
[
(bins_int, 1, [bins_int]), # ( bins_in, n_args, bins_out )
(bins_str, 1, [bins_str]),
(bins_arr, 1, [bins_arr]),
([bins_int], 1, [bins_int]),
(bins_int, 2, 2 * [bins_int]),
(bins_str, 2, 2 * [bins_str]),
(bins_arr, 2, 2 * [bins_arr]),
([bins_int, bins_str, bins_arr], 3, [bins_int, bins_str, bins_arr]),
([bins_arr], 2, None),
(None, 1, None),
([bins_arr, bins_arr], 1, None),
],
)
def test_ensure_correctly_formatted_bins(in_out):
"""Test the helper function _ensure_correctly_formatted_bins"""
bins_in, n, bins_expected = in_out
if bins_expected is not None:
bins = _ensure_correctly_formatted_bins(bins_in, n)
assert bins == bins_expected
else:
with pytest.raises((ValueError, TypeError)):
_ensure_correctly_formatted_bins(bins_in, n)
@pytest.mark.parametrize(
"in_out",
[
(range_, 1, [range_]), # ( range_in, n_args, range_out )
(range_, 2, [range_, range_]),
([range_, range_], 2, [range_, range_]),
([(range_[0],)], 1, None),
([range_], 2, None),
([range_, range_], 1, None),
],
)
def test_ensure_correctly_formatted_range(in_out):
"""Test the helper function _ensure_correctly_formatted_range"""
range_in, n, range_expected = in_out
if range_expected is not None:
range_ = _ensure_correctly_formatted_range(range_in, n)
assert range_ == range_expected
else:
with pytest.raises(ValueError):
_ensure_correctly_formatted_range(range_in, n)
@pytest.mark.parametrize("block_size", [None, 1, 2])
@pytest.mark.parametrize("use_dask", [False, True])
def test_histogram_results_datetime(use_dask, block_size):
"""Test computing histogram of datetime objects"""
data = pd.date_range(start="2000-06-01", periods=5)
if use_dask:
data = dsa.asarray(data, chunks=(5,))
# everything should be in the second bin (index 1)
bins = np.array(
[
np.datetime64("1999-01-01"),
np.datetime64("2000-01-01"),
np.datetime64("2001-01-01"),
]
)
h = histogram(data, bins=bins, block_size=block_size)[0]
expected = np.histogram(data, bins=bins)[0]
np.testing.assert_allclose(h, expected)
|
mit
|
mantidproject/mantid
|
qt/python/mantidqt/plotting/test/test_figuretype.py
|
3
|
2627
|
# Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
# This file is part of the mantid workbench.
#
#
from __future__ import absolute_import
# std imports
from unittest import TestCase, main
# thirdparty imports
import matplotlib
matplotlib.use('AGG') # noqa
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.mplot3d.art3d import Poly3DCollection
# local imports
from mantidqt.plotting.figuretype import figure_type, FigureType
class FigureTypeTest(TestCase):
def test_figure_type_empty_figure_returns_empty(self):
self.assertEqual(FigureType.Empty, figure_type(plt.figure()))
def test_subplot_with_multiple_plots_returns_other(self):
ax = plt.subplot(221)
self.assertEqual(FigureType.Other, figure_type(ax.figure))
def test_line_plot_returns_line(self):
ax = plt.subplot(111)
ax.plot([1])
self.assertEqual(FigureType.Line, figure_type(ax.figure))
def test_error_plot_returns_error(self):
ax = plt.subplot(111)
ax.errorbar([1], [1], yerr=[0.01])
self.assertEqual(FigureType.Errorbar, figure_type(ax.figure))
def test_image_plot_returns_image(self):
ax = plt.subplot(111)
ax.imshow([[1], [1]])
self.assertEqual(FigureType.Image, figure_type(ax.figure))
def test_surface_plot_returns_surface(self):
a = np.array([[1]])
fig, ax = plt.subplots(subplot_kw={'projection': 'mantid3d'})
ax.plot_surface(a, a, a)
self.assertEqual(FigureType.Surface, figure_type(ax.figure))
def test_wireframe_plot_returns_wireframe(self):
a = np.array([[1]])
ax = plt.subplot(111, projection='3d')
ax.plot_wireframe(a, a, a)
self.assertEqual(FigureType.Wireframe, figure_type(ax.figure))
def test_contour_plot_returns_contour(self):
ax = plt.subplot(111)
ax.imshow([[1], [1]])
ax.contour([[1, 1], [1, 1]])
self.assertEqual(FigureType.Contour, figure_type(ax.figure))
def test_mesh_plot_returns_mesh(self):
a = np.array([[[1, 1, 1], [2, 2, 2], [3, 3, 3]]])
mesh_polygon = Poly3DCollection(a)
fig, ax = plt.subplots(subplot_kw={'projection': 'mantid3d'})
ax.add_collection3d(mesh_polygon)
self.assertEqual(FigureType.Mesh, figure_type(ax.figure))
if __name__ == '__main__':
main()
|
gpl-3.0
|
TheCentralLimit/TopSecret
|
src/classifier.py
|
1
|
4248
|
"""
"""
from __future__ import division, print_function
import numpy as np
from matplotlib import pyplot as plt
from sklearn.svm import LinearSVC
from sklearn import grid_search
from astroML.utils import completeness_contamination
from os import path
import gw
def classifier(m_1, m_2, M_c, s,
ax_pdf, ax_data, ax_log_pdf, ax_log_data,
output_directory):
M_c_front = M_c[:len(s)//2]
M_c_end = M_c[len(s)//2:]
s_front = s [:len(s)//2]
s_end = s [len(s)//2:]
index_pos = (s==1)
index_neg = (s==0)
M_c_em = M_c[index_pos]
M_c_not_em = M_c[index_neg]
Mcem_max = max(M_c_em)
Mcnotem_min = min(M_c_not_em)
#Training a classifier with half the data set
train = M_c[:len(M_c)//2]
index_pos_half = index_pos[:len(s)//2]
index_neg_half = index_neg[:len(s)//2]
Mcem_half = train[index_pos_half]
Mcnotem_half = train[index_neg_half]
#Calculating the dividing line(using half the data)
Mcem_half_max = max(Mcem_half)
Mcem_half_min = min(Mcnotem_half)
distance = abs(Mcem_half_max - Mcem_half_min)/2.
line_half = Mcem_half_max + distance
#Print the Max Mc of EM CP and Min of other along with the dividing line Mc
print("It works" if Mcem_max < line_half < Mcnotem_min else "It doesn't work")
print("The Minimum M_c for the Others is: ", Mcnotem_min)
print("The Maximum M_c for the EM CP is: ", Mcem_max)
print("The Dividing line trained by half the data is: ", line_half)
for ax in [ax_pdf, ax_data, ax_log_pdf, ax_log_data]:
ax.axvline(line_half, color="black", linestyle="--")
fig_train, ax = plt.subplots()
ax.scatter(M_c_front[s_front],
np.random.uniform(0.0, 0.5, size=np.shape(M_c_front[s_front])),
edgecolor="red", facecolor="none", marker="s")
ax.scatter(M_c_end[s_end],
np.random.uniform(0.5, 1.0, size=np.shape(M_c_end[s_end])),
edgecolor="red", facecolor="red", marker="s")
ax.scatter(M_c_front[~s_front],
np.random.uniform(0.0, 0.5, size=np.shape(M_c_front[~s_front])),
edgecolor="blue", facecolor="none", marker="o")
ax.scatter(M_c_end[~s_end],
np.random.uniform(0.5, 1.0, size=np.shape(M_c_end[~s_end])),
edgecolor="blue", facecolor="blue", marker="o")
ax.axvline(line_half, color="black", linestyle="--")
ax.set_xlabel(r"$\mathcal{M}_c\ [M_\odot]$")
ax.semilogx()
ax.yaxis.set_ticklabels([])
fig_train.savefig(path.join(output_directory, "classifier_comparison.pdf"))
fig_2d, ax_2d = plt.subplots()
m_1_smooth = np.logspace(0, 1.3, 1000)
ax_2d.scatter(m_1[s], m_2[s],
color="red", marker="s")
ax_2d.scatter(m_1[~s], m_2[~s],
color="blue", marker="o")
ax_2d.plot(m_1_smooth, gw.m_2(m_1_smooth, line_half), "k--")
ax_2d.set_xlabel(r"$m_1\ [M_\odot]$")
ax_2d.set_ylabel(r"$m_2\ [M_\odot]$")
ax_2d.loglog()
fig_2d.savefig(path.join(output_directory, "mass-distribution.pdf"))
m1_m2 = np.column_stack((m_1,m_2))
train2 = np.log10(m1_m2[:len(m1_m2)//2])
clf = LinearSVC(C=100,class_weight='balanced').fit(train2, index_pos_half)
index_pos_half_pred = clf.predict(train2)
completeness2, contamination2 = completeness_contamination(index_pos_half_pred, index_pos_half)
print("2D completeness: ", completeness2)
print("2D contamination: ", contamination2)
xx, yy = np.meshgrid(np.logspace(np.log10(m_1.min()), np.log10(m_1.max()), 500,endpoint=True),
np.logspace(np.log10(m_2.min()), np.log10(m_2.max()), 500,endpoint=True))
Z = clf.predict(np.log10(np.c_[xx.ravel(), yy.ravel()]))
Z = Z.reshape(xx.shape)
print(np.unique(s))
fig2d, ax2d = plt.subplots()
ax2d.contourf(xx, yy, Z, cmap=plt.cm.Paired, alpha=0.8,antialiased=False,
extend='neither')
ax2d.scatter(m_1, m_2, c=s, cmap=plt.cm.Paired)
ax2d.set_xlabel('m$_1$')
ax2d.set_ylabel('m$_2$')
ax2d.loglog()
ax2d.set_xlim(m_1.min(), m_1.max())
ax2d.set_ylim(m_2.min(), m_2.max())
fig2d.savefig(path.join(output_directory, "classifier-2D.pdf"))
|
mit
|
massmutual/scikit-learn
|
sklearn/ensemble/tests/test_weight_boosting.py
|
83
|
17276
|
"""Testing for the boost module (sklearn.ensemble.boost)."""
import numpy as np
from sklearn.utils.testing import assert_array_equal, assert_array_less
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal, assert_true
from sklearn.utils.testing import assert_raises, assert_raises_regexp
from sklearn.base import BaseEstimator
from sklearn.cross_validation import train_test_split
from sklearn.grid_search import GridSearchCV
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import AdaBoostRegressor
from sklearn.ensemble import weight_boosting
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import coo_matrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
from sklearn.svm import SVC, SVR
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.utils import shuffle
from sklearn import datasets
# Common random state
rng = np.random.RandomState(0)
# Toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y_class = ["foo", "foo", "foo", 1, 1, 1] # test string class labels
y_regr = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
y_t_class = ["foo", 1, 1]
y_t_regr = [-1, 1, 1]
# Load the iris dataset and randomly permute it
iris = datasets.load_iris()
perm = rng.permutation(iris.target.size)
iris.data, iris.target = shuffle(iris.data, iris.target, random_state=rng)
# Load the boston dataset and randomly permute it
boston = datasets.load_boston()
boston.data, boston.target = shuffle(boston.data, boston.target,
random_state=rng)
def test_samme_proba():
# Test the `_samme_proba` helper function.
# Define some example (bad) `predict_proba` output.
probs = np.array([[1, 1e-6, 0],
[0.19, 0.6, 0.2],
[-999, 0.51, 0.5],
[1e-6, 1, 1e-9]])
probs /= np.abs(probs.sum(axis=1))[:, np.newaxis]
# _samme_proba calls estimator.predict_proba.
# Make a mock object so I can control what gets returned.
class MockEstimator(object):
def predict_proba(self, X):
assert_array_equal(X.shape, probs.shape)
return probs
mock = MockEstimator()
samme_proba = weight_boosting._samme_proba(mock, 3, np.ones_like(probs))
assert_array_equal(samme_proba.shape, probs.shape)
assert_true(np.isfinite(samme_proba).all())
# Make sure that the correct elements come out as smallest --
# `_samme_proba` should preserve the ordering in each example.
assert_array_equal(np.argmin(samme_proba, axis=1), [2, 0, 0, 2])
assert_array_equal(np.argmax(samme_proba, axis=1), [0, 1, 1, 1])
def test_classification_toy():
# Check classification on a toy dataset.
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg, random_state=0)
clf.fit(X, y_class)
assert_array_equal(clf.predict(T), y_t_class)
assert_array_equal(np.unique(np.asarray(y_t_class)), clf.classes_)
assert_equal(clf.predict_proba(T).shape, (len(T), 2))
assert_equal(clf.decision_function(T).shape, (len(T),))
def test_regression_toy():
# Check classification on a toy dataset.
clf = AdaBoostRegressor(random_state=0)
clf.fit(X, y_regr)
assert_array_equal(clf.predict(T), y_t_regr)
def test_iris():
# Check consistency on dataset iris.
classes = np.unique(iris.target)
clf_samme = prob_samme = None
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg)
clf.fit(iris.data, iris.target)
assert_array_equal(classes, clf.classes_)
proba = clf.predict_proba(iris.data)
if alg == "SAMME":
clf_samme = clf
prob_samme = proba
assert_equal(proba.shape[1], len(classes))
assert_equal(clf.decision_function(iris.data).shape[1], len(classes))
score = clf.score(iris.data, iris.target)
assert score > 0.9, "Failed with algorithm %s and score = %f" % \
(alg, score)
# Somewhat hacky regression test: prior to
# ae7adc880d624615a34bafdb1d75ef67051b8200,
# predict_proba returned SAMME.R values for SAMME.
clf_samme.algorithm = "SAMME.R"
assert_array_less(0,
np.abs(clf_samme.predict_proba(iris.data) - prob_samme))
def test_boston():
# Check consistency on dataset boston house prices.
clf = AdaBoostRegressor(random_state=0)
clf.fit(boston.data, boston.target)
score = clf.score(boston.data, boston.target)
assert score > 0.85
def test_staged_predict():
# Check staged predictions.
rng = np.random.RandomState(0)
iris_weights = rng.randint(10, size=iris.target.shape)
boston_weights = rng.randint(10, size=boston.target.shape)
# AdaBoost classification
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg, n_estimators=10)
clf.fit(iris.data, iris.target, sample_weight=iris_weights)
predictions = clf.predict(iris.data)
staged_predictions = [p for p in clf.staged_predict(iris.data)]
proba = clf.predict_proba(iris.data)
staged_probas = [p for p in clf.staged_predict_proba(iris.data)]
score = clf.score(iris.data, iris.target, sample_weight=iris_weights)
staged_scores = [
s for s in clf.staged_score(
iris.data, iris.target, sample_weight=iris_weights)]
assert_equal(len(staged_predictions), 10)
assert_array_almost_equal(predictions, staged_predictions[-1])
assert_equal(len(staged_probas), 10)
assert_array_almost_equal(proba, staged_probas[-1])
assert_equal(len(staged_scores), 10)
assert_array_almost_equal(score, staged_scores[-1])
# AdaBoost regression
clf = AdaBoostRegressor(n_estimators=10, random_state=0)
clf.fit(boston.data, boston.target, sample_weight=boston_weights)
predictions = clf.predict(boston.data)
staged_predictions = [p for p in clf.staged_predict(boston.data)]
score = clf.score(boston.data, boston.target, sample_weight=boston_weights)
staged_scores = [
s for s in clf.staged_score(
boston.data, boston.target, sample_weight=boston_weights)]
assert_equal(len(staged_predictions), 10)
assert_array_almost_equal(predictions, staged_predictions[-1])
assert_equal(len(staged_scores), 10)
assert_array_almost_equal(score, staged_scores[-1])
def test_gridsearch():
# Check that base trees can be grid-searched.
# AdaBoost classification
boost = AdaBoostClassifier(base_estimator=DecisionTreeClassifier())
parameters = {'n_estimators': (1, 2),
'base_estimator__max_depth': (1, 2),
'algorithm': ('SAMME', 'SAMME.R')}
clf = GridSearchCV(boost, parameters)
clf.fit(iris.data, iris.target)
# AdaBoost regression
boost = AdaBoostRegressor(base_estimator=DecisionTreeRegressor(),
random_state=0)
parameters = {'n_estimators': (1, 2),
'base_estimator__max_depth': (1, 2)}
clf = GridSearchCV(boost, parameters)
clf.fit(boston.data, boston.target)
def test_pickle():
# Check pickability.
import pickle
# Adaboost classifier
for alg in ['SAMME', 'SAMME.R']:
obj = AdaBoostClassifier(algorithm=alg)
obj.fit(iris.data, iris.target)
score = obj.score(iris.data, iris.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(iris.data, iris.target)
assert_equal(score, score2)
# Adaboost regressor
obj = AdaBoostRegressor(random_state=0)
obj.fit(boston.data, boston.target)
score = obj.score(boston.data, boston.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(boston.data, boston.target)
assert_equal(score, score2)
def test_importances():
# Check variable importances.
X, y = datasets.make_classification(n_samples=2000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=1)
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg)
clf.fit(X, y)
importances = clf.feature_importances_
assert_equal(importances.shape[0], 10)
assert_equal((importances[:3, np.newaxis] >= importances[3:]).all(),
True)
def test_error():
# Test that it gives proper exception on deficient input.
assert_raises(ValueError,
AdaBoostClassifier(learning_rate=-1).fit,
X, y_class)
assert_raises(ValueError,
AdaBoostClassifier(algorithm="foo").fit,
X, y_class)
assert_raises(ValueError,
AdaBoostClassifier().fit,
X, y_class, sample_weight=np.asarray([-1]))
def test_base_estimator():
# Test different base estimators.
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
# XXX doesn't work with y_class because RF doesn't support classes_
# Shouldn't AdaBoost run a LabelBinarizer?
clf = AdaBoostClassifier(RandomForestClassifier())
clf.fit(X, y_regr)
clf = AdaBoostClassifier(SVC(), algorithm="SAMME")
clf.fit(X, y_class)
from sklearn.ensemble import RandomForestRegressor
from sklearn.svm import SVR
clf = AdaBoostRegressor(RandomForestRegressor(), random_state=0)
clf.fit(X, y_regr)
clf = AdaBoostRegressor(SVR(), random_state=0)
clf.fit(X, y_regr)
# Check that an empty discrete ensemble fails in fit, not predict.
X_fail = [[1, 1], [1, 1], [1, 1], [1, 1]]
y_fail = ["foo", "bar", 1, 2]
clf = AdaBoostClassifier(SVC(), algorithm="SAMME")
assert_raises_regexp(ValueError, "worse than random",
clf.fit, X_fail, y_fail)
def test_sample_weight_missing():
from sklearn.linear_model import LogisticRegression
from sklearn.cluster import KMeans
clf = AdaBoostClassifier(LogisticRegression(), algorithm="SAMME")
assert_raises(ValueError, clf.fit, X, y_regr)
clf = AdaBoostClassifier(KMeans(), algorithm="SAMME")
assert_raises(ValueError, clf.fit, X, y_regr)
clf = AdaBoostRegressor(KMeans())
assert_raises(ValueError, clf.fit, X, y_regr)
def test_sparse_classification():
# Check classification with sparse input.
class CustomSVC(SVC):
"""SVC variant that records the nature of the training set."""
def fit(self, X, y, sample_weight=None):
"""Modification on fit caries data type for later verification."""
super(CustomSVC, self).fit(X, y, sample_weight=sample_weight)
self.data_type_ = type(X)
return self
X, y = datasets.make_multilabel_classification(n_classes=1, n_samples=15,
n_features=5,
random_state=42)
# Flatten y to a 1d array
y = np.ravel(y)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for sparse_format in [csc_matrix, csr_matrix, lil_matrix, coo_matrix,
dok_matrix]:
X_train_sparse = sparse_format(X_train)
X_test_sparse = sparse_format(X_test)
# Trained on sparse format
sparse_classifier = AdaBoostClassifier(
base_estimator=CustomSVC(probability=True),
random_state=1,
algorithm="SAMME"
).fit(X_train_sparse, y_train)
# Trained on dense format
dense_classifier = AdaBoostClassifier(
base_estimator=CustomSVC(probability=True),
random_state=1,
algorithm="SAMME"
).fit(X_train, y_train)
# predict
sparse_results = sparse_classifier.predict(X_test_sparse)
dense_results = dense_classifier.predict(X_test)
assert_array_equal(sparse_results, dense_results)
# decision_function
sparse_results = sparse_classifier.decision_function(X_test_sparse)
dense_results = dense_classifier.decision_function(X_test)
assert_array_equal(sparse_results, dense_results)
# predict_log_proba
sparse_results = sparse_classifier.predict_log_proba(X_test_sparse)
dense_results = dense_classifier.predict_log_proba(X_test)
assert_array_equal(sparse_results, dense_results)
# predict_proba
sparse_results = sparse_classifier.predict_proba(X_test_sparse)
dense_results = dense_classifier.predict_proba(X_test)
assert_array_equal(sparse_results, dense_results)
# score
sparse_results = sparse_classifier.score(X_test_sparse, y_test)
dense_results = dense_classifier.score(X_test, y_test)
assert_array_equal(sparse_results, dense_results)
# staged_decision_function
sparse_results = sparse_classifier.staged_decision_function(
X_test_sparse)
dense_results = dense_classifier.staged_decision_function(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# staged_predict
sparse_results = sparse_classifier.staged_predict(X_test_sparse)
dense_results = dense_classifier.staged_predict(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# staged_predict_proba
sparse_results = sparse_classifier.staged_predict_proba(X_test_sparse)
dense_results = dense_classifier.staged_predict_proba(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# staged_score
sparse_results = sparse_classifier.staged_score(X_test_sparse,
y_test)
dense_results = dense_classifier.staged_score(X_test, y_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# Verify sparsity of data is maintained during training
types = [i.data_type_ for i in sparse_classifier.estimators_]
assert all([(t == csc_matrix or t == csr_matrix)
for t in types])
def test_sparse_regression():
# Check regression with sparse input.
class CustomSVR(SVR):
"""SVR variant that records the nature of the training set."""
def fit(self, X, y, sample_weight=None):
"""Modification on fit caries data type for later verification."""
super(CustomSVR, self).fit(X, y, sample_weight=sample_weight)
self.data_type_ = type(X)
return self
X, y = datasets.make_regression(n_samples=15, n_features=50, n_targets=1,
random_state=42)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for sparse_format in [csc_matrix, csr_matrix, lil_matrix, coo_matrix,
dok_matrix]:
X_train_sparse = sparse_format(X_train)
X_test_sparse = sparse_format(X_test)
# Trained on sparse format
sparse_classifier = AdaBoostRegressor(
base_estimator=CustomSVR(),
random_state=1
).fit(X_train_sparse, y_train)
# Trained on dense format
dense_classifier = dense_results = AdaBoostRegressor(
base_estimator=CustomSVR(),
random_state=1
).fit(X_train, y_train)
# predict
sparse_results = sparse_classifier.predict(X_test_sparse)
dense_results = dense_classifier.predict(X_test)
assert_array_equal(sparse_results, dense_results)
# staged_predict
sparse_results = sparse_classifier.staged_predict(X_test_sparse)
dense_results = dense_classifier.staged_predict(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
types = [i.data_type_ for i in sparse_classifier.estimators_]
assert all([(t == csc_matrix or t == csr_matrix)
for t in types])
def test_sample_weight_adaboost_regressor():
"""
AdaBoostRegressor should work without sample_weights in the base estimator
The random weighted sampling is done internally in the _boost method in
AdaBoostRegressor.
"""
class DummyEstimator(BaseEstimator):
def fit(self, X, y):
pass
def predict(self, X):
return np.zeros(X.shape[0])
boost = AdaBoostRegressor(DummyEstimator(), n_estimators=3)
boost.fit(X, y_regr)
assert_equal(len(boost.estimator_weights_), len(boost.estimator_errors_))
|
bsd-3-clause
|
abhishekgahlot/scikit-learn
|
examples/classification/plot_classifier_comparison.py
|
181
|
4699
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=====================
Classifier comparison
=====================
A comparison of a several classifiers in scikit-learn on synthetic datasets.
The point of this example is to illustrate the nature of decision boundaries
of different classifiers.
This should be taken with a grain of salt, as the intuition conveyed by
these examples does not necessarily carry over to real datasets.
Particularly in high-dimensional spaces, data can more easily be separated
linearly and the simplicity of classifiers such as naive Bayes and linear SVMs
might lead to better generalization than is achieved by other classifiers.
The plots show training points in solid colors and testing points
semi-transparent. The lower right shows the classification accuracy on the test
set.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Andreas Müller
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn.cross_validation import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import make_moons, make_circles, make_classification
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.lda import LDA
from sklearn.qda import QDA
h = .02 # step size in the mesh
names = ["Nearest Neighbors", "Linear SVM", "RBF SVM", "Decision Tree",
"Random Forest", "AdaBoost", "Naive Bayes", "LDA", "QDA"]
classifiers = [
KNeighborsClassifier(3),
SVC(kernel="linear", C=0.025),
SVC(gamma=2, C=1),
DecisionTreeClassifier(max_depth=5),
RandomForestClassifier(max_depth=5, n_estimators=10, max_features=1),
AdaBoostClassifier(),
GaussianNB(),
LDA(),
QDA()]
X, y = make_classification(n_features=2, n_redundant=0, n_informative=2,
random_state=1, n_clusters_per_class=1)
rng = np.random.RandomState(2)
X += 2 * rng.uniform(size=X.shape)
linearly_separable = (X, y)
datasets = [make_moons(noise=0.3, random_state=0),
make_circles(noise=0.2, factor=0.5, random_state=1),
linearly_separable
]
figure = plt.figure(figsize=(27, 9))
i = 1
# iterate over datasets
for ds in datasets:
# preprocess dataset, split into training and test part
X, y = ds
X = StandardScaler().fit_transform(X)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.4)
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# just plot the dataset first
cm = plt.cm.RdBu
cm_bright = ListedColormap(['#FF0000', '#0000FF'])
ax = plt.subplot(len(datasets), len(classifiers) + 1, i)
# Plot the training points
ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright)
# and testing points
ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright, alpha=0.6)
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xticks(())
ax.set_yticks(())
i += 1
# iterate over classifiers
for name, clf in zip(names, classifiers):
ax = plt.subplot(len(datasets), len(classifiers) + 1, i)
clf.fit(X_train, y_train)
score = clf.score(X_test, y_test)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
if hasattr(clf, "decision_function"):
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
else:
Z = clf.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1]
# Put the result into a color plot
Z = Z.reshape(xx.shape)
ax.contourf(xx, yy, Z, cmap=cm, alpha=.8)
# Plot also the training points
ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright)
# and testing points
ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright,
alpha=0.6)
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xticks(())
ax.set_yticks(())
ax.set_title(name)
ax.text(xx.max() - .3, yy.min() + .3, ('%.2f' % score).lstrip('0'),
size=15, horizontalalignment='right')
i += 1
figure.subplots_adjust(left=.02, right=.98)
plt.show()
|
bsd-3-clause
|
dandanvidi/capacity-usage
|
scripts/LTEE_to_cu.py
|
3
|
2658
|
# -*- coding: utf-8 -*-
"""
Created on Sun Jul 3 14:09:58 2016
@author: dan
"""
import re, csv
import pandas as pd
from collections import defaultdict
from capacity_usage import CAPACITY_USAGE
flux = pd.DataFrame.from_csv("../data/mmol_gCDW_h.csv")
abundance = pd.DataFrame.from_csv("../data/g_gCDW.csv")
cu = CAPACITY_USAGE(flux, abundance)
lenski = pd.DataFrame.from_csv("../data/Lenski_genes_[Pelosi_etal_2006].csv")
new_index = [re.split(r"[()]", s)[1] for s in lenski.index]
new_Q1 = [float(re.split(r" ", s)[0]) for s in lenski.Quantitatione1]
new_Q2 = [float(re.split(r" ", s)[0]) for s in lenski.Quantitatione2]
lenski.index = new_index
lenski.Quantitatione1 = new_Q1
lenski.Quantitatione2 = new_Q2
lenski = lenski[~lenski.index.duplicated(keep='first')]
#lenski.loc[x, 'Quantitatione1'] = 1 / lenski.loc[x, 'Quantitatione1']
#lenski.loc[x, 'Quantitatione2'] = 1 / lenski.loc[x, 'Quantitatione2']
#lenski.loc[:,'lower in'] = ['Evol']*len(lenski)
#%%
for g in lenski.index:
with open('../data/all_ecoli_genes.txt', 'rb') as csvfile:
f = csv.reader(csvfile)
for i, s in enumerate(f):
if g in s[0]:
lenski.loc[g, 'bnumber'] = s[0][:5]
break
#%%
out = pd.DataFrame(index=cu.rxns.keys(), columns=lenski.columns)
for g in lenski.index:
b = lenski.bnumber[g]
try:
b = cu.model.genes.get_by_id(b)
reactions = map(str, b.reactions)
for r in reactions:
out.loc[r] = lenski.loc[g]
except KeyError:
continue
out.dropna(subset=['Quantitatione1'], inplace=True)
out = out[['Quantitatione1', 'Quantitatione2', 'lower in']]
x = out.loc[:,['Quantitatione1', 'Quantitatione2']]
out.loc[:, 'Quantitatione'] = x.mean(axis=1)
out = out[['lower in', 'Quantitatione']]
out.loc[:, 'CU on glucose'] = (1-cu.CU.loc[out.index, 'glucose']) * cu.E['glucose'] + 1e-3
out.loc[:, 'Evol/Anc'] = out.Quantitatione
for x in out.index:
if out.loc[x, 'lower in'] == 'Anc':
if out.loc[x, 'Evol/Anc'] < 1:
out.loc[x, 'Evol/Anc'] = 1 / out.loc[x, 'Evol/Anc']
if out.loc[x, 'lower in'] == 'Evo':
if out.loc[x, 'Evol/Anc'] > 1:
out.loc[x, 'Evol/Anc'] = 1 / out.loc[x, 'Evol/Anc']
#%%
import matplotlib.pyplot as plt
plt.scatter(out['CU on glucose'], out['Evol/Anc'], edgecolor='')
plt.axhline(y=1, color='r')
#plt.xlim(-0.1,1.1)
plt.ylim(-0.1,6)
plt.xscale('log')
plt.ylabel('Evolved /Ancestor')
plt.xlabel('capacity usage')
out.replace(np.inf, np.nan, inplace=True)
x = out[out['CU on glucose']<0.1].index
print out.loc[x].mean()
cu.E.loc['MALS']
x = out[out['Evol/Anc']<1]
print len(x)
|
mit
|
illoyd/algo
|
robinhood.py
|
1
|
16355
|
import os
import re
import requests
import pandas as pd
import datetime
import dateutil
import logging
from collections import deque
import simpleapi
import resourceful
import helper
##
# Base class for exceptions in this module
class Error(Exception):
def __init__(self, original: Exception = None, *args: object) -> None:
super().__init__(*args)
self.original = original
##
# Base order error class
class OrderError(Error):
def __init__(self, message: str, *args: object):
super().__init__(*args)
self.message = message
##
class TooManySharesError(OrderError):
def __init__(self, symbol: str, quantity: float, *args: object):
super().__init__(*args)
self.symbol = symbol
self.quantity = float(quantity)
class BuyingTooManySharesError(TooManySharesError):
pass
##
class SellingTooManySharesError(TooManySharesError):
pass
##
# The Robinhood interface, built from the ground up sadly!
class Client(object):
def __init__(self, username=None, password=None, account_id=None, token=None):
self.username = None
# Coalesce to environment defaults
username = helper.coalesce(username, os.environ.get('ROBINHOOD_USERNAME'))
password = helper.coalesce(password, os.environ.get('ROBINHOOD_PASSWORD'))
account_id = helper.coalesce(account_id, os.environ.get('ROBINHOOD_ACCOUNTID'))
token = helper.coalesce(token, os.environ.get('ROBINHOOD_TOKEN'))
# Set up the instrument cache
self.instrument_cache = {}
# Activate the client
self.api = simpleapi.TokenAPI(
simpleapi.MemoryCacheAPI(
simpleapi.API('https://api.robinhood.com/')
),
token=token
)
# Update headers for Robinhood
self.api.session.headers.update({
"Accept": "application/json",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "en;q=1, fr;q=0.9, de;q=0.8, ja;q=0.7, nl;q=0.6, it;q=0.5",
"Content-Type": "application/x-www-form-urlencoded; charset=utf-8",
"X-Robinhood-API-Version": "1.0.0",
"Connection": "keep-alive",
"User-Agent": "Robinhood/823 (iPhone; iOS 7.1.2; Scale/2.00)"
})
# Perform login
if (not self.is_logged_in()) and username and password:
self.login(username, password)
# Add account id
self.account_id = account_id
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
if self.is_logged_in():
self.logout()
##
# Perform login to Robinhood, and save the returned token
# @return Nothing
def login(self, username, password):
logging.info('Logging in as %s', username)
# Save the username for reference
self.username = username
# Sign in
data = {
'username': self.username,
'password': password,
'expires_in': 86400,
'grant_type': 'password',
'scope': 'internal',
'client_id': 'c82SH0WZOsabOXGP2sxqcj34FxkvfnWRZBKlBjFS'
}
response = self.api.post('/oauth2/token/', data=data)
pass
def logout(self):
self.username, self.api.token = None, None
pass
def is_logged_in(self):
return bool(self.api.token)
##
# Get accounts associated with this user.
# @returns An Accounts collection.
@property
def accounts(self):
return Accounts(self.api)
##
# Get an identified account, defaulting to the first account if no ID given.
# @returns An Account resource.
def account(self, account_id=None):
# If given an account ID, or if defined on this Client, return an account resource
if account_id or self.account_id:
account = self.accounts.account(account_id or self.account_id)
# Otherwise, look up the default account, save the ID, and return the resource
else:
account = self.accounts.default
self.account_id = account.id
return account
@property
def equity(self) -> float:
"""Get the current total equity, which is cash + held assets
:return: a float representing the value of cash and held assets
"""
return float(self.portfolio()['equity'])
##
# Get the current total margin, which is the Robinhood Gold limit
# @return Float representing total margin
@property
def margin(self):
return float(self.account()['margin_balances']['margin_limit'])
##
# Get quotes
# @return A pandas dataframe of symbols and prices
def historical_prices(self, *symbols_or_ids):
# If no symbols passed, abort
if not symbols_or_ids:
return pd.DataFrame()
# Query API
symbol_list = ','.join([*symbols_or_ids])
response = self.api.get('/quotes/historicals/', params={'symbols': symbol_list, 'interval': 'day'}).json()
# Process response
quotes = []
for entry in response.get('results', []):
symbol = entry['symbol']
prices = list(map(lambda e: float(e['close_price']), entry['historicals']))
dates = list(map(lambda e: dateutil.parser.parse(e['begins_at']).date(), entry['historicals']))
s = pd.Series(prices, index=dates, name=symbol)
quotes.append(s)
return pd.concat(quotes, axis=1)
@property
def watchlists(self):
return Watchlists(self.api)
##
# Get watchlist
# @return An array of symbols included in this watchlist
def watchlist(self, name="Default"):
return Watchlist(self.watchlists, name + '/')
##
# Get the instrument details
def instrument(self, symbol_or_id):
# Extract an ID from a string, if available, and use as the search term
match = helper.id_for(symbol_or_id)
if match:
symbol_or_id = match
# TODO: Turn this into a real cache, but for now...
if not self.instrument_cache.get(symbol_or_id):
logging.info('Finding instrument %s', symbol_or_id)
if helper.id_for(symbol_or_id):
instrument = self.api.get(('/instruments/{}/', symbol_or_id)).json()
else:
instrument = self.api.get('/instruments/', params={'symbol': symbol_or_id}).json()['results'][0]
self.instrument_cache[instrument['symbol']] = instrument
self.instrument_cache[instrument['id']] = instrument
return self.instrument_cache[symbol_or_id]
##
# Get current account portfolio
# @return A response object of the portfolio
def portfolio(self):
return self.account().portfolio
##
# Get all positions; note that this includes closed positions!
# @return A list of positions as hashes
def positions(self):
return self.account().positions
##
# Get all open positions; removes any closed positions from list
# @return A list of open positions
def open_positions(self):
positions = self.positions()
positions = [position for position in positions if position.quantity > 0.0]
for position in positions:
position['symbol'] = self.instrument(position['instrument'])['symbol']
return pd.Series({p['symbol']: float(p['quantity']) for p in positions})
def quotes(self, *symbols_or_ids):
symbol_list = ','.join([*symbols_or_ids])
response = self.api.get('/quotes/', params={'symbols': symbol_list}).json()
quotes = [(float(quote['bid_price']) + float(quote['ask_price'])) / 2.0 for quote in response['results']]
index = [quote['symbol'] for quote in response['results']]
return pd.Series(quotes, index=index)
##
# Issue a buy order
# @return Whatever!
def buy(self, symbol, quantity, price):
response = self._buy(symbol, quantity, price)
# If not successful...
if response.status_code != requests.codes.ok:
# Check if buying too many shares
search = re.search('[Yy]ou can only purchase (\d+) shares', response.text)
if search:
raise BuyingTooManySharesError(symbol, search.group(1), response.text)
# Otherwise, raise general error message
else:
raise OrderError(response.text)
return response
def _buy(self, symbol, quantity, price):
data = {
'account': self.account_uri(),
'instrument': self.instrument(symbol)['url'],
'symbol': symbol,
'type': 'limit',
'price': price,
'time_in_force': 'gfd',
'trigger': 'immediate',
'quantity': abs(quantity),
'side': 'buy'
}
return self.api.post('/orders/', data=data)
##
# Issue a sell order
# @return Whatever!
def sell(self, symbol, quantity):
data = {
'account': self.account_uri(),
'instrument': self.instrument(symbol)['url'],
'symbol': symbol,
'type': 'market',
'time_in_force': 'gfd',
'trigger': 'immediate',
'quantity': abs(quantity),
'side': 'sell'
}
return self.api.post('/orders/', data=data)
def account_uri(self):
# TODO FIx this!
return 'https://api.robinhood.com/accounts/' + self.account_id + '/'
@property
def orders(self):
return Orders(self.api)
@property
def markets(self):
return Markets(self.api)
@property
def nyse_market(self):
return Market(self.markets, 'XNYS')
def are_markets_open(self, date=None):
return self.nyse_market.is_open(date)
##
# Generic Order
class Order(object):
def __init__(self, symbol, quantity, limit=None, stop=None):
self.symbol = symbol
self.quantity = quantity
self.limit = limit
self.stop = stop
##
# A buy order
class BuyOrder(Order):
pass
##
# A sell order
class SellOrder(Order):
pass
##
# The Order Manager class, for managing the buying and selling orders of an account.
class OrderManager(object):
def __init__(self, client):
self.client = client
self.orders = deque()
def add(self, order):
self.orders.append(order)
def buy(self, symbol, quantity, limit=None):
self.add(BuyOrder(symbol, quantity, limit=limit))
def sell(self, symbol, quantity, stop=None):
self.add(SellOrder(symbol, quantity, stop=stop))
def execute(self):
while self.orders:
order = self.orders.popleft()
self._execute_order(order)
def _execute_order(self, order):
logging.info(' %s %s: %s @ %s', self._humanize_order(order), order.symbol, abs(order.quantity),
(order.limit or order.stop or 'market'))
try:
if isinstance(order, BuyOrder):
response = self.client.buy(order.symbol, order.quantity, order.limit)
elif isinstance(order, SellOrder):
response = self.client.sell(order.symbol, order.quantity)
except BuyingTooManySharesError as error:
logging.warning(' May only buy %s shares of %s', error.quantity, error.symbol)
self.buy(order.symbol, error.quantity, order.limit)
except SellingTooManySharesError as error:
logging.warning(' May only sell %s shares of %s', error.quantity, error.symbol)
self.sell(order.symbol, error.quantity, order.stop)
except OrderError as error:
logging.error('Unexpected order error: %s', error.message)
else:
logging.info(' Ok! Order is %s', response.json()['state'])
def _humanize_order(self, order):
if isinstance(order, BuyOrder):
return "Buying"
elif isinstance(order, SellOrder):
return "Selling"
return str(order)
class Account(resourceful.Instance):
ID_FIELD = 'account_number'
@property
def positions(self):
return Positions(self)
@property
def portfolio(self):
return Portfolio(self, 'portfolio')
##
# Account collections
class Accounts(resourceful.Collection):
ENDPOINT = 'accounts/'
INSTANCE_CLASS = Account
def account(self, account_id):
return Account(self, account_id)
@property
def default(self):
return self[0]
class Position(resourceful.Instance):
ID_FIELD = 'id'
@property
def quantity(self):
return float(self['quantity'])
@property
def is_open(self):
return self.quantity > 0.0
class Positions(resourceful.Collection):
ENDPOINT = 'positions/'
INSTANCE_CLASS = Position
class Order(resourceful.Instance):
ID_FIELD = 'id'
class Orders(resourceful.Collection):
ENDPOINT = 'orders/'
INSTANCE_CLASS = Order
class Instrument(resourceful.Instance):
ID_FIELD = 'id'
@property
def symbol(self):
return self['symbol']
def __repr__(self):
return self._to_repr(id=self.id, symbol=self.symbol)
class Instruments(resourceful.Collection):
ENDPOINT = 'instruments/'
INSTANCE_CLASS = Instrument
class Market(resourceful.Instance):
ID_FIELD = 'mic'
def hours(self, date=None):
date = date or datetime.datetime.now()
year, month, day = date.year, date.month, date.day
uri = ('hours/{}-{}-{}/', year, month, day)
return resourceful.Response(self.get(uri))
def is_open(self, date=None):
return self.hours(date)['is_open']
class Markets(resourceful.Collection):
ENDPOINT = 'markets/'
INSTANCE_CLASS = Market
class Portfolio(resourceful.Instance):
ID_FIELD = 'url'
@property
def equity(self):
return float(self['equity'])
##
# Watchlist Instrument class, for use with a Watchlist.
class WatchlistInstrument(resourceful.Instance):
ID_FIELD = 'url'
@resourceful.Instance.endpoint.setter
def endpoint(self, value):
self._endpoint = helper.id_for(value)
@property
def id(self):
return helper.id_for(self[self.ID_FIELD])
@property
def instrument(self):
return Instrument(Instruments(self.api_or_parent, root=True), self.id)
class Watchlist(resourceful.Collection):
INSTANCE_CLASS = WatchlistInstrument
def instruments(self):
return [ii.instrument for ii in self.list()]
def symbols(self):
return [ii.symbol for ii in self.instruments()]
def add_all(self, *id_or_symbols):
return [self.add(id_or_symbol) for id_or_symbol in id_or_symbols]
def remove_all(self, *id_or_symbols):
return [self.remove(id_or_symbol) for id_or_symbol in id_or_symbols]
def add(self, id_or_symbol):
if hasattr(id_or_symbol, 'id'):
return self.add_instrument(id_or_symbol.id)
elif helper.id_for(id_or_symbol):
return self.add_instrument(helper.id_for(id_or_symbol))
else:
return self.add_symbols(id_or_symbol)
def remove(self, id_or_symbol):
if hasattr(id_or_symbol, 'id'):
return self.remove_instrument(id_or_symbol.id)
elif helper.id_for(id_or_symbol):
return self.remove_instrument(helper.id_for(id_or_symbol))
else:
return self.remove_symbol(id_or_symbol)
def add_symbols(self, *symbols):
symbol_list = ','.join([*symbols])
return self.post('bulk_add/', data={'symbols': symbol_list})
def remove_symbol(self, symbol):
# For every symbol, find its instrument ID and delete
instrument_id = helper.symbol_table.get(symbol)
if not instrument_id:
instrument_id = Instruments(self, root=True).find_by(symbol=symbol).id
return self.remove_instrument(instrument_id)
def add_instrument(self, instrument_id):
return self.post(instrument_id)
def remove_instrument(self, instrument_id):
return self.delete(instrument_id)
class Watchlists(resourceful.Collection):
ENDPOINT = 'watchlists/'
INSTANCE_CLASS = Watchlist
def create(self, name):
return self.post(None, data={'name': name})
|
mit
|
numenta/htmresearch
|
projects/l2_pooling/plot_capacity_result.py
|
7
|
5539
|
import os
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import pandas as pd
from capacity_test import _prepareResultsDir, plotResults
from capacity_test import DEFAULT_RESULT_DIR_NAME, DEFAULT_PLOT_DIR_NAME
plt.ion()
if __name__ == "__main__":
numCorticalColumns = 1
confusionThreshold = 30
l4ColumnCountList = [256, 256, 512]
numInputBitsList = [12, 5, 10]
resultDirName=DEFAULT_RESULT_DIR_NAME
plotDirName=DEFAULT_PLOT_DIR_NAME
DEFAULT_RESULT_DIR_NAME = "results"
DEFAULT_PLOT_DIR_NAME = "plots"
DEFAULT_COLORS = ("b", "r", "c", "g", 'm')
# Plot capacity vs L4 size
expParams = []
expParams.append(
{'l4Column': 150, 'externalInputSize': 2400, 'w': 20, 'sample': 6,
'thresh': 3})
expParams.append(
{'l4Column': 200, 'externalInputSize': 2400, 'w': 20, 'sample': 6,
'thresh': 3})
expParams.append(
{'l4Column': 250, 'externalInputSize': 2400, 'w': 20, 'sample': 6,
'thresh': 3})
# plot result
ploti = 0
fig, ax = plt.subplots(2, 2)
st = fig.suptitle(
"Varying number of objects ({} cortical column{})"
.format(numCorticalColumns, "s" if numCorticalColumns > 1 else ""
), fontsize="x-large"
)
for axi in (0, 1):
for axj in (0, 1):
ax[axi][axj].xaxis.set_major_locator(ticker.MultipleLocator(100))
legendEntries = []
for expParam in expParams:
expname = "multiple_column_capacity_varying_object_num_synapses_{}_thresh_{}_l4column_{}".format(
expParam['sample'], expParam['thresh'], expParam["l4Column"])
resultFileName = _prepareResultsDir("{}.csv".format(expname),
resultDirName=resultDirName
)
result = pd.read_csv(resultFileName)
plotResults(result, ax, "numObjects", None, DEFAULT_COLORS[ploti], confusionThreshold, 0)
ploti += 1
legendEntries.append("L4 mcs {} w {} s {} thresh {}".format(
expParam["l4Column"], expParam['w'], expParam['sample'],
expParam['thresh']))
ax[0, 0].legend(legendEntries, loc=4, fontsize=8)
fig.tight_layout()
# shift subplots down:
st.set_y(0.95)
fig.subplots_adjust(top=0.85)
plt.savefig(
os.path.join(
plotDirName,
"capacity_varying_object_num_l4size_summary.pdf"
)
)
# Plot capacity vs L2 size
expParams = []
expParams.append(
{'L2cellCount': 2048, 'L2activeBits': 40, 'w': 10, 'sample': 6, 'thresh': 3,
'l2Column': 1})
expParams.append(
{'L2cellCount': 4096, 'L2activeBits': 40, 'w': 10, 'sample': 6, 'thresh': 3,
'l2Column': 1})
expParams.append(
{'L2cellCount': 6144, 'L2activeBits': 40, 'w': 10, 'sample': 6, 'thresh': 3,
'l2Column': 1})
# plot result
ploti = 0
fig, ax = plt.subplots(2, 2)
st = fig.suptitle("Varying number of objects", fontsize="x-large")
for axi in (0, 1):
for axj in (0, 1):
ax[axi][axj].xaxis.set_major_locator(ticker.MultipleLocator(100))
legendEntries = []
for expParam in expParams:
expName = "multiple_column_capacity_varying_object_num_synapses_{}_thresh_{}_l2Cells_{}_l2column_{}".format(
expParam['sample'], expParam['thresh'], expParam["L2cellCount"],
expParam['l2Column'])
resultFileName = _prepareResultsDir("{}.csv".format(expName),
resultDirName=resultDirName
)
result = pd.read_csv(resultFileName)
plotResults(result, ax, "numObjects", None, DEFAULT_COLORS[ploti], confusionThreshold, 0)
ploti += 1
legendEntries.append("L2 cells {}/{} #cc {} ".format(
expParam['L2activeBits'], expParam['L2cellCount'], expParam['l2Column']))
ax[0, 0].legend(legendEntries, loc=3, fontsize=8)
fig.tight_layout()
# shift subplots down:
st.set_y(0.95)
fig.subplots_adjust(top=0.85)
plt.savefig(
os.path.join(
plotDirName,
"capacity_vs_L2size.pdf"
)
)
# Plot capacity vs number of cortical columns
expParams = []
expParams.append(
{'l4Column': 150, 'externalInputSize': 2400, 'w': 10, 'sample': 6,
'thresh': 3, 'l2Column': 1})
expParams.append(
{'l4Column': 150, 'externalInputSize': 2400, 'w': 10, 'sample': 6,
'thresh': 3, 'l2Column': 2})
expParams.append(
{'l4Column': 150, 'externalInputSize': 2400, 'w': 10, 'sample': 6,
'thresh': 3, 'l2Column': 3})
# plot result
ploti = 0
fig, ax = plt.subplots(2, 2)
st = fig.suptitle("Varying number of columns", fontsize="x-large")
for axi in (0, 1):
for axj in (0, 1):
ax[axi][axj].xaxis.set_major_locator(ticker.MultipleLocator(100))
legendEntries = []
for expParam in expParams:
expName = "multiple_column_capacity_varying_object_num_synapses_{}_thresh_{}_l4column_{}_l2column_{}".format(
expParam['sample'], expParam['thresh'], expParam["l4Column"],
expParam['l2Column'])
resultFileName = _prepareResultsDir("{}.csv".format(expName),
resultDirName=resultDirName
)
result = pd.read_csv(resultFileName)
plotResults(result, ax, "numObjects", None, DEFAULT_COLORS[ploti], confusionThreshold, 0)
ploti += 1
legendEntries.append("L4 mcs {} #cc {} ".format(
expParam['l4Column'], expParam['l2Column']))
ax[0, 0].legend(legendEntries, loc=3, fontsize=8)
# shift subplots down:
st.set_y(0.95)
fig.subplots_adjust(top=0.85)
plt.savefig(
os.path.join(
plotDirName,
"capacity_vs_num_columns.pdf"
)
)
|
agpl-3.0
|
pxzhang94/GAN
|
GAN/vanilla_gan/gan_tensorflow.py
|
1
|
3411
|
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import os
def xavier_init(size):
in_dim = size[0]
xavier_stddev = 1. / tf.sqrt(in_dim / 2.)
return tf.random_normal(shape=size, stddev=xavier_stddev)
X = tf.placeholder(tf.float32, shape=[None, 784])
D_W1 = tf.Variable(xavier_init([784, 128]))
D_b1 = tf.Variable(tf.zeros(shape=[128]))
D_W2 = tf.Variable(xavier_init([128, 1]))
D_b2 = tf.Variable(tf.zeros(shape=[1]))
theta_D = [D_W1, D_W2, D_b1, D_b2]
Z = tf.placeholder(tf.float32, shape=[None, 100])
G_W1 = tf.Variable(xavier_init([100, 128]))
G_b1 = tf.Variable(tf.zeros(shape=[128]))
G_W2 = tf.Variable(xavier_init([128, 784]))
G_b2 = tf.Variable(tf.zeros(shape=[784]))
theta_G = [G_W1, G_W2, G_b1, G_b2]
def sample_Z(m, n):
return np.random.uniform(-1., 1., size=[m, n])
def generator(z):
G_h1 = tf.nn.relu(tf.matmul(z, G_W1) + G_b1)
G_log_prob = tf.matmul(G_h1, G_W2) + G_b2
G_prob = tf.nn.sigmoid(G_log_prob)
return G_prob
def discriminator(x):
D_h1 = tf.nn.relu(tf.matmul(x, D_W1) + D_b1)
D_logit = tf.matmul(D_h1, D_W2) + D_b2
D_prob = tf.nn.sigmoid(D_logit)
return D_prob, D_logit
def plot(samples):
fig = plt.figure(figsize=(4, 4))
gs = gridspec.GridSpec(4, 4)
gs.update(wspace=0.05, hspace=0.05)
for i, sample in enumerate(samples):
ax = plt.subplot(gs[i])
plt.axis('off')
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_aspect('equal')
plt.imshow(sample.reshape(28, 28), cmap='Greys_r')
return fig
G_sample = generator(Z)
D_real, D_logit_real = discriminator(X)
D_fake, D_logit_fake = discriminator(G_sample)
# D_loss = -tf.reduce_mean(tf.log(D_real) + tf.log(1. - D_fake))
# G_loss = -tf.reduce_mean(tf.log(D_fake))
# Alternative losses:
# -------------------
D_loss_real = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=D_logit_real, labels=tf.ones_like(D_logit_real)))
D_loss_fake = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=D_logit_fake, labels=tf.zeros_like(D_logit_fake)))
D_loss = D_loss_real + D_loss_fake
G_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=D_logit_fake, labels=tf.ones_like(D_logit_fake)))
D_solver = tf.train.AdamOptimizer().minimize(D_loss, var_list=theta_D)
G_solver = tf.train.AdamOptimizer().minimize(G_loss, var_list=theta_G)
mb_size = 128
Z_dim = 100
mnist = input_data.read_data_sets('../../MNIST_data', one_hot=True)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
if not os.path.exists('out/'):
os.makedirs('out/')
i = 0
for it in range(1000000):
if it % 1000 == 0:
samples = sess.run(G_sample, feed_dict={Z: sample_Z(16, Z_dim)})
fig = plot(samples)
plt.savefig('out/{}.png'.format(str(i).zfill(3)), bbox_inches='tight')
i += 1
plt.close(fig)
X_mb, _ = mnist.train.next_batch(mb_size)
_, D_loss_curr = sess.run([D_solver, D_loss], feed_dict={X: X_mb, Z: sample_Z(mb_size, Z_dim)})
_, G_loss_curr = sess.run([G_solver, G_loss], feed_dict={Z: sample_Z(mb_size, Z_dim)})
if it % 1000 == 0:
print('Iter: {}'.format(it))
print('D loss: {:.4}'. format(D_loss_curr))
print('G_loss: {:.4}'.format(G_loss_curr))
print()
|
apache-2.0
|
mirnylab/cooler
|
tests/test_core.py
|
1
|
9935
|
from __future__ import division, print_function
from io import BytesIO
from scipy import sparse
import numpy as np
import pandas as pd
import h5py
import pytest
from cooler import core
def make_hdf5_table(mode):
s = BytesIO()
f = h5py.File(s, mode)
h5opts = dict(compression='gzip', compression_opts=6, maxshape=(None,))
grp = f.create_group('table')
grp.create_dataset(
'chrom',
data=np.array(['chr1', 'chr1', 'chr1', 'chr2', 'chr2'], dtype='S'),
**h5opts
)
grp.create_dataset(
'start',
data=[0, 10, 20, 0, 10],
**h5opts
)
grp.create_dataset(
'end',
data=[10, 20, 32, 10, 21],
**h5opts
)
grp.create_dataset(
'value',
data=[1.1, 2.0, 3.0, 4.0, 5.0],
**h5opts
)
f.flush()
return f
def test_get():
f = make_hdf5_table('a')
out = core.get(f['table'], 0, 3, ['chrom', 'value'])
assert isinstance(out, pd.DataFrame)
assert len(out.columns) == 2
assert out['chrom'].astype('U').tolist() == ['chr1', 'chr1', 'chr1']
assert np.allclose(out['value'].values, [1.1, 2.0, 3.0])
out = core.get(f['table'], 0, 3, 'value')
assert isinstance(out, pd.Series)
assert np.allclose(out.values, [1.1, 2.0, 3.0])
out = core.get(f['table'], 0, 3, 'value', as_dict=True)
assert isinstance(out, dict)
assert np.allclose(out['value'], [1.1, 2.0, 3.0])
out = core.get(f['table'])
assert len(out) == 5
assert len(out.columns) == 4
out = core.get(f['table'], lo=None)
assert len(out) == 5
assert len(out.columns) == 4
out = core.get(f['table'], lo=3)
assert len(out) == 2
assert len(out.columns) == 4
def test_put():
f = make_hdf5_table('a')
# append
df = pd.DataFrame({
'chrom': ['chr3', 'chr3'],
'start': [0, 20],
'end': [20, 40],
'value': [4.0, 5.0],
})
core.put(f['table'], df, lo=5)
f.flush()
out = core.get(f['table'])
assert len(out) == 7
# insert a categorical column
s = pd.Series(pd.Categorical(out['chrom'], ordered=True), index=out.index)
s.name = 'chrom_enum'
core.put(f['table'], s)
assert h5py.check_dtype(enum=f['table/chrom_enum'].dtype)
out = core.get(f['table'])
assert len(out.columns) == 5
assert pd.api.types.is_categorical_dtype(out['chrom_enum'].dtype)
out = core.get(f['table'], convert_enum=False)
assert len(out.columns) == 5
assert pd.api.types.is_integer_dtype(out['chrom_enum'].dtype)
# don't convert categorical to enum
s.name = 'chrom_string'
core.put(f['table'], s, store_categories=False)
out = core.get(f['table'])
assert len(out.columns) == 6
assert not pd.api.types.is_categorical_dtype(out['chrom_string'].dtype)
# scalar input
core.put(f['table'], {'foo': 42})
out = core.get(f['table'])
assert len(out.columns) == 7
assert (out['foo'] == 42).all()
def test_delete():
f = make_hdf5_table('a')
core.delete(f['table'])
assert len(f['table'].keys()) == 0
f = make_hdf5_table('a')
core.delete(f['table'], ['chrom'])
assert len(f['table'].keys()) == 3
f = make_hdf5_table('a')
core.delete(f['table'], 'chrom')
assert len(f['table'].keys()) == 3
def test_region_to_offset_extent(mock_cooler):
chromID_lookup = pd.Series({"chr1": 0, "chr2": 1})
binsize = 100
region = ("chr1", 159, 402)
first, last = 1, 4
assert core.region_to_extent(
mock_cooler, chromID_lookup, region, binsize
) == (first, last + 1)
assert core.region_to_extent(mock_cooler, chromID_lookup, region, None) == (
first,
last + 1,
)
assert core.region_to_offset(
mock_cooler, chromID_lookup, region, binsize
) == first
assert core.region_to_offset(
mock_cooler, chromID_lookup, region, None
) == first
region = ("chr1", 159, 400)
first, last = 1, 3
assert core.region_to_extent(
mock_cooler, chromID_lookup, region, binsize
) == (first, last + 1)
assert core.region_to_extent(mock_cooler, chromID_lookup, region, None) == (
first,
last + 1,
)
assert core.region_to_offset(
mock_cooler, chromID_lookup, region, binsize
) == first
assert core.region_to_offset(
mock_cooler, chromID_lookup, region, None
) == first
def test_interval_ops():
assert core._comes_before(1, 5, 6, 10)
assert not core._comes_before(6, 10, 1, 5)
assert core._comes_before(1, 5, 6, 10, strict=True)
assert core._comes_before(1, 5, 5, 10, strict=True)
assert core._comes_before(1, 5, 3, 10)
assert not core._comes_before(1, 5, 3, 10, strict=True)
assert core._contains(1, 10, 3, 5)
assert core._contains(1, 10, 3, 5, strict=True)
assert core._contains(1, 10, 3, 10)
assert not core._contains(1, 10, 3, 10, strict=True)
assert not core._contains(1, 5, 6, 10)
def test_indexing_mixin():
class Impl(core._IndexingMixin):
def __init__(self, shape):
self._shape = shape
def __getitem__(self, key):
s1, s2 = self._unpack_index(key)
i0, i1 = self._process_slice(s1, self._shape[0])
j0, j1 = self._process_slice(s2, self._shape[1])
return i0, i1, j0, j1
obj = Impl((10, 10))
# row scalar
assert obj[5] == (5, 6, 0, 10)
assert obj[5, ] == (5, 6, 0, 10)
# row slice
assert obj[:] == (0, 10, 0, 10)
assert obj[1:5] == (1, 5, 0, 10)
assert obj[:-2] == (0, 8, 0, 10)
assert obj[-2:] == (8, 10, 0, 10)
# slice + scalar
assert obj[1:5, 3] == (1, 5, 3, 4)
assert obj[2, 1:5] == (2, 3, 1, 5)
assert obj[2, 0:-2] == (2, 3, 0, 8)
assert obj[-2, 0:-2] == (8, 9, 0, 8)
# row + col scalar query
assert obj[5, 5] == (5, 6, 5, 6)
# row + col slices
assert obj[:, :] == (0, 10, 0, 10)
assert obj[1:5, :] == (1, 5, 0, 10)
assert obj[:, 2:3] == (0, 10, 2, 3)
assert obj[1:5, 2:3] == (1, 5, 2, 3)
with pytest.raises(IndexError):
obj[10]
with pytest.raises(TypeError):
obj[{}]
# with pytest.raises(TypeError):
# obj[4.5]
def test_selector1d():
slicer = lambda fields, lo, hi: (lo, hi) # noqa
fetcher = lambda x: x # noqa
nmax = 50
s = core.RangeSelector1D(None, slicer, fetcher, nmax)
assert s[30] == (30, 31)
assert s[10:20] == (10, 20)
assert s[:20] == (0, 20)
assert s[10:] == (10, nmax)
assert s[:] == (0, nmax)
assert s[:nmax] == (0, nmax)
assert s[:-10] == (0, nmax - 10)
assert s[1:1] == (1, 1)
with pytest.raises(IndexError):
s[:, :]
with pytest.raises(ValueError):
s[::2]
# assert_raises(TypeError, lambda : s['blah'])
assert s.shape == (nmax,)
# FIXME - questionable behavior
assert s[30:20] == (30, 20) # lo > hi
assert s[nmax + 10 : nmax + 30] == (nmax + 10, nmax + 30) # lo > nmax
assert s[10.0] == (10, 11) # accepting floats
# assert s[10.1] == (10.1, 11.1) # not casting
# assert s[nmax+10] == (nmax+10, nmax+11)
slicer = lambda fields, lo, hi: pd.DataFrame( # noqa
np.zeros((hi - lo, len(fields))),
columns=fields
)
fetcher = lambda x: list(map(int, x.split(':'))) # noqa
nmax = 50
sel = core.RangeSelector1D(['a', 'b', 'c'], slicer, fetcher, nmax)
assert sel.columns.tolist() == ['a', 'b', 'c']
assert list(sel.keys()) == ['a', 'b', 'c']
assert isinstance(sel.dtypes, pd.Series)
assert 'a' in sel
assert len(sel) == 50
assert len(sel[['a', 'b']].columns) == 2
assert len(sel[['a']].columns) == 1
assert np.all(sel[5] == 0)
assert np.all(sel[5, ] == 0)
assert len(sel.fetch('5:10')) == 5
# some things are broken here
series_view = sel['a']
assert len(series_view) == 50
assert series_view.shape == (50,)
# series_view.columns ???
def test_selector2d():
slicer = lambda field, i0, i1, j0, j1: (i0, i1, j0, j1) # noqa
fetcher = lambda x: x # noqa
nmax = 50
s = core.RangeSelector2D(None, slicer, fetcher, (nmax, nmax))
assert s[30] == (30, 31, 0, nmax)
assert s[10:20, 10:20] == (10, 20, 10, 20)
assert s[:] == (0, nmax, 0, nmax)
with pytest.raises(IndexError):
s[:, :, :]
with pytest.raises(ValueError):
s[::2, :]
assert s.shape == (nmax, nmax)
slicer = lambda field, i0, i1, j0, j1: ( # noqa
np.zeros((i1 - i0, j1 - j0))
)
fetcher = lambda x, y=None: (0, 10, 0, 10) # noqa
nmax = 50
sel = core.RangeSelector2D('count', slicer, fetcher, (nmax, nmax))
assert sel.shape == (50, 50)
assert len(sel) == 50
assert sel[:10, 5:10].shape == (10, 5)
assert sel.fetch('0:10', '0:10').shape == (10, 10)
def test_slice_matrix(mock_cooler):
slices = [
(0, 10, 0, 10),
(0, 10, 10, 20),
(5, 15, 10, 20),
(10, 20, 5, 15),
(1, 1, 5, 15),
(1, 1, 1, 1),
]
for i0, i1, j0, j1 in slices:
triu_reader = core.CSRReader(mock_cooler, "count", max_chunk=10)
# triangular query
index = triu_reader.index_col(i0, i1, j0, j1)
i, j, v = triu_reader.query(i0, i1, j0, j1)
assert len(index) == len(i)
# rectangular query
i, j, v = core.query_rect(triu_reader.query, i0, i1, j0, j1)
mat = sparse.coo_matrix((v, (i - i0, j - j0)), (i1 - i0, j1 - j0)).toarray()
r = sparse.coo_matrix(
(
(
mock_cooler["pixels/count"],
(mock_cooler["pixels/bin1_id"], mock_cooler["pixels/bin2_id"]),
)
),
(mock_cooler.attrs["nbins"],) * 2,
)
r_full = r.toarray() + r.toarray().T
assert np.allclose(r_full[i0:i1, j0:j1], mat)
def test_csr_reader():
pass
def test_query_rect():
pass
|
bsd-3-clause
|
fredhusser/scikit-learn
|
sklearn/datasets/tests/test_lfw.py
|
230
|
7880
|
"""This test for the LFW require medium-size data dowloading and processing
If the data has not been already downloaded by running the examples,
the tests won't run (skipped).
If the test are run, the first execution will be long (typically a bit
more than a couple of minutes) but as the dataset loader is leveraging
joblib, successive runs will be fast (less than 200ms).
"""
import random
import os
import shutil
import tempfile
import numpy as np
from sklearn.externals import six
try:
try:
from scipy.misc import imsave
except ImportError:
from scipy.misc.pilutil import imsave
except ImportError:
imsave = None
from sklearn.datasets import load_lfw_pairs
from sklearn.datasets import load_lfw_people
from sklearn.datasets import fetch_lfw_pairs
from sklearn.datasets import fetch_lfw_people
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import raises
SCIKIT_LEARN_DATA = tempfile.mkdtemp(prefix="scikit_learn_lfw_test_")
SCIKIT_LEARN_EMPTY_DATA = tempfile.mkdtemp(prefix="scikit_learn_empty_test_")
LFW_HOME = os.path.join(SCIKIT_LEARN_DATA, 'lfw_home')
FAKE_NAMES = [
'Abdelatif_Smith',
'Abhati_Kepler',
'Camara_Alvaro',
'Chen_Dupont',
'John_Lee',
'Lin_Bauman',
'Onur_Lopez',
]
def setup_module():
"""Test fixture run once and common to all tests of this module"""
if imsave is None:
raise SkipTest("PIL not installed.")
if not os.path.exists(LFW_HOME):
os.makedirs(LFW_HOME)
random_state = random.Random(42)
np_rng = np.random.RandomState(42)
# generate some random jpeg files for each person
counts = {}
for name in FAKE_NAMES:
folder_name = os.path.join(LFW_HOME, 'lfw_funneled', name)
if not os.path.exists(folder_name):
os.makedirs(folder_name)
n_faces = np_rng.randint(1, 5)
counts[name] = n_faces
for i in range(n_faces):
file_path = os.path.join(folder_name, name + '_%04d.jpg' % i)
uniface = np_rng.randint(0, 255, size=(250, 250, 3))
try:
imsave(file_path, uniface)
except ImportError:
raise SkipTest("PIL not installed")
# add some random file pollution to test robustness
with open(os.path.join(LFW_HOME, 'lfw_funneled', '.test.swp'), 'wb') as f:
f.write(six.b('Text file to be ignored by the dataset loader.'))
# generate some pairing metadata files using the same format as LFW
with open(os.path.join(LFW_HOME, 'pairsDevTrain.txt'), 'wb') as f:
f.write(six.b("10\n"))
more_than_two = [name for name, count in six.iteritems(counts)
if count >= 2]
for i in range(5):
name = random_state.choice(more_than_two)
first, second = random_state.sample(range(counts[name]), 2)
f.write(six.b('%s\t%d\t%d\n' % (name, first, second)))
for i in range(5):
first_name, second_name = random_state.sample(FAKE_NAMES, 2)
first_index = random_state.choice(np.arange(counts[first_name]))
second_index = random_state.choice(np.arange(counts[second_name]))
f.write(six.b('%s\t%d\t%s\t%d\n' % (first_name, first_index,
second_name, second_index)))
with open(os.path.join(LFW_HOME, 'pairsDevTest.txt'), 'wb') as f:
f.write(six.b("Fake place holder that won't be tested"))
with open(os.path.join(LFW_HOME, 'pairs.txt'), 'wb') as f:
f.write(six.b("Fake place holder that won't be tested"))
def teardown_module():
"""Test fixture (clean up) run once after all tests of this module"""
if os.path.isdir(SCIKIT_LEARN_DATA):
shutil.rmtree(SCIKIT_LEARN_DATA)
if os.path.isdir(SCIKIT_LEARN_EMPTY_DATA):
shutil.rmtree(SCIKIT_LEARN_EMPTY_DATA)
@raises(IOError)
def test_load_empty_lfw_people():
fetch_lfw_people(data_home=SCIKIT_LEARN_EMPTY_DATA, download_if_missing=False)
def test_load_lfw_people_deprecation():
msg = ("Function 'load_lfw_people' has been deprecated in 0.17 and will be "
"removed in 0.19."
"Use fetch_lfw_people(download_if_missing=False) instead.")
assert_warns_message(DeprecationWarning, msg, load_lfw_people,
data_home=SCIKIT_LEARN_DATA)
def test_load_fake_lfw_people():
lfw_people = fetch_lfw_people(data_home=SCIKIT_LEARN_DATA,
min_faces_per_person=3, download_if_missing=False)
# The data is croped around the center as a rectangular bounding box
# arounthe the face. Colors are converted to gray levels:
assert_equal(lfw_people.images.shape, (10, 62, 47))
assert_equal(lfw_people.data.shape, (10, 2914))
# the target is array of person integer ids
assert_array_equal(lfw_people.target, [2, 0, 1, 0, 2, 0, 2, 1, 1, 2])
# names of the persons can be found using the target_names array
expected_classes = ['Abdelatif Smith', 'Abhati Kepler', 'Onur Lopez']
assert_array_equal(lfw_people.target_names, expected_classes)
# It is possible to ask for the original data without any croping or color
# conversion and not limit on the number of picture per person
lfw_people = fetch_lfw_people(data_home=SCIKIT_LEARN_DATA,
resize=None, slice_=None, color=True, download_if_missing=False)
assert_equal(lfw_people.images.shape, (17, 250, 250, 3))
# the ids and class names are the same as previously
assert_array_equal(lfw_people.target,
[0, 0, 1, 6, 5, 6, 3, 6, 0, 3, 6, 1, 2, 4, 5, 1, 2])
assert_array_equal(lfw_people.target_names,
['Abdelatif Smith', 'Abhati Kepler', 'Camara Alvaro',
'Chen Dupont', 'John Lee', 'Lin Bauman', 'Onur Lopez'])
@raises(ValueError)
def test_load_fake_lfw_people_too_restrictive():
fetch_lfw_people(data_home=SCIKIT_LEARN_DATA, min_faces_per_person=100, download_if_missing=False)
@raises(IOError)
def test_load_empty_lfw_pairs():
fetch_lfw_pairs(data_home=SCIKIT_LEARN_EMPTY_DATA, download_if_missing=False)
def test_load_lfw_pairs_deprecation():
msg = ("Function 'load_lfw_pairs' has been deprecated in 0.17 and will be "
"removed in 0.19."
"Use fetch_lfw_pairs(download_if_missing=False) instead.")
assert_warns_message(DeprecationWarning, msg, load_lfw_pairs,
data_home=SCIKIT_LEARN_DATA)
def test_load_fake_lfw_pairs():
lfw_pairs_train = fetch_lfw_pairs(data_home=SCIKIT_LEARN_DATA, download_if_missing=False)
# The data is croped around the center as a rectangular bounding box
# arounthe the face. Colors are converted to gray levels:
assert_equal(lfw_pairs_train.pairs.shape, (10, 2, 62, 47))
# the target is whether the person is the same or not
assert_array_equal(lfw_pairs_train.target, [1, 1, 1, 1, 1, 0, 0, 0, 0, 0])
# names of the persons can be found using the target_names array
expected_classes = ['Different persons', 'Same person']
assert_array_equal(lfw_pairs_train.target_names, expected_classes)
# It is possible to ask for the original data without any croping or color
# conversion
lfw_pairs_train = fetch_lfw_pairs(data_home=SCIKIT_LEARN_DATA,
resize=None, slice_=None, color=True, download_if_missing=False)
assert_equal(lfw_pairs_train.pairs.shape, (10, 2, 250, 250, 3))
# the ids and class names are the same as previously
assert_array_equal(lfw_pairs_train.target, [1, 1, 1, 1, 1, 0, 0, 0, 0, 0])
assert_array_equal(lfw_pairs_train.target_names, expected_classes)
|
bsd-3-clause
|
haphaeu/yoshimi
|
neural_networks/mnist/mnist.py
|
1
|
4523
|
# -*- coding: utf-8 -*-
"""
Created on Sun Feb 26 16:15:51 2017
Read data from MNIST and converts it to be used with the neural network implementation in
network.py
[1] http://yann.lecun.com/exdb/mnist/
TRAINING SET LABEL FILE (train-labels-idx1-ubyte):
[offset] [type] [value] [description]
0000 32 bit integer 0x00000801(2049) magic number (MSB first)
0004 32 bit integer 60000 number of items
0008 unsigned byte ?? label
0009 unsigned byte ?? label
........
xxxx unsigned byte ?? label
The labels values are 0 to 9.
TRAINING SET IMAGE FILE (train-images-idx3-ubyte):
[offset] [type] [value] [description]
0000 32 bit integer 0x00000803(2051) magic number
0004 32 bit integer 60000 number of images
0008 32 bit integer 28 number of rows
0012 32 bit integer 28 number of columns
0016 unsigned byte ?? pixel
0017 unsigned byte ?? pixel
........
xxxx unsigned byte ?? pixel
Pixels are organized row-wise. Pixel values are 0 to 255. 0 means background (white), 255 means
foreground (black).
@author: raf
"""
import gzip
import struct
import numpy as np
from matplotlib import pyplot as plt
def fetch():
import urllib.request
from os import path
print("Fetching data from MNIST.")
urls = ["http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz",
"http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz",
"http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz",
"http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz"]
for url in urls:
fname = path.basename(url)
if path.exists(fname):
print(fname, "already exists")
else:
print("Downloading", url)
urllib.request.urlretrieve(url, fname)
def load():
"""Loads training and test data"""
# reads labels
with gzip.open('train-labels-idx1-ubyte.gz', 'rb') as pf:
magic, num = struct.unpack(">II", pf.read(2*4))
labels = struct.unpack("B" * num, pf.read(num))
# reads the images.
with gzip.open('train-images-idx3-ubyte.gz', 'rb') as pf:
magic, num, rows, cols = struct.unpack(">IIII", pf.read(4*4))
images = [struct.unpack("B" * rows*cols, pf.read(rows*cols)) for _ in range(num)]
# re-shapes it to a column vector and normalises it
images = [np.reshape(x, (rows*cols, 1))/255.0 for x in images]
# vectorise the labels, eg, 5 will be [0, 0, 0, 0, 0, 1, 0, 0, 0, 0]
labels = [vectorise_label(j) for j in labels]
train_data = list(zip(images, labels))
with gzip.open('t10k-labels-idx1-ubyte.gz', 'rb') as pf:
magic, num = struct.unpack(">II", pf.read(2*4))
labels = struct.unpack("B" * num, pf.read(num))
with gzip.open('t10k-images-idx3-ubyte.gz', 'rb') as pf:
magic, num, rows, cols = struct.unpack(">IIII", pf.read(4*4))
images = [struct.unpack("B" * rows*cols, pf.read(rows*cols)) for _ in range(num)]
# re-shapes it to a column vector and normalises it
images = [np.reshape(x, (rows*cols, 1))/255.0 for x in images]
# note that for the test data, we don't need the vector form of the labels...
test_data = list(zip(images, labels))
return (train_data, test_data)
def vectorise_label(j):
"""# vectorise the labels, eg, 5 --> [0, 0, 0, 0, 0, 1, 0, 0, 0, 0]"""
e = np.zeros((10, 1))
e[j] = 1.0
return e
def interpret_vector(v):
"""inverse function of vectorise_label, [0, 0, 0, 0, 0, 1, 0, 0, 0, 0] --> 5"""
return v.argmax()
def show(images, sz=(28, 28), w=8):
"""plots one of more images and their correct labels"""
from matplotlib.cm import gray
num = len(images)
fig = plt.figure()
for i, img in enumerate(images):
raw, lbl = img
ax = fig.add_subplot((num-1)//w+1, min(w, num), i+1)
imgplot = ax.imshow(1.0-raw.reshape(sz), cmap=gray)
imgplot.set_interpolation('nearest')
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_axis_off()
ax.text(28, 28, lbl if isinstance(lbl, int) else interpret_vector(lbl))
plt.show()
def test_mnist(num=64):
"""simple test of loaded data and plot images"""
train, test = load()
np.random.shuffle(train)
np.random.shuffle(test)
show(train[:num//2]+test[:num//2])
|
lgpl-3.0
|
dmsul/econtools
|
econtools/metrics/tests/test_colinear.py
|
1
|
2139
|
from os import path
import pandas as pd
import numpy as np
import pytest
from econtools.metrics.api import reg
from econtools.metrics.core import _get_colinear_cols
class TestCheckColinear(object):
def setup(self):
"""Stata reg output from `sysuse auto; reg price mpg`"""
test_path = path.split(path.relpath(__file__))[0]
auto_path = path.join(test_path, 'data', 'auto.dta')
autodata = pd.read_stata(auto_path)
self.df = autodata
def test_one_colin_column(self):
df = self.df.copy()
df['neg_mpg'] = -1 * df['mpg']
result = _get_colinear_cols(df[['price', 'mpg', 'neg_mpg']])
expected = ['neg_mpg']
assert result == expected
def test_two_colin_column(self):
df = self.df.copy()
df['neg_mpg'] = -1 * df['mpg']
df['mpg2'] = 2 * df['mpg']
result = _get_colinear_cols(df[['price', 'mpg', 'neg_mpg', 'mpg2']])
expected = ['neg_mpg', 'mpg2']
assert result == expected
def test_two_colin_column_reorder(self):
df = self.df.copy()
df['neg_mpg'] = -1 * df['mpg']
df['mpg2'] = 2 * df['mpg']
result = _get_colinear_cols(df[['mpg2', 'mpg', 'price', 'neg_mpg']])
expected = ['mpg', 'neg_mpg']
assert result == expected
class TestColinearReg(object):
def test_colinear_reg(self):
"""Stata reg output from `sysuse auto; reg price mpg`"""
test_path = path.split(path.relpath(__file__))[0]
auto_path = path.join(test_path, 'data', 'auto.dta')
autodata = pd.read_stata(auto_path)
autodata['mpg2'] = autodata['mpg'] * 2
y = 'price'
x = ['mpg', 'length', 'mpg2']
# Check without check (Singular matrix error)
with pytest.raises(np.linalg.linalg.LinAlgError):
reg(autodata, y, x, addcons=True)
# Check with check (my ValueError, with message)
with pytest.raises(ValueError) as e:
reg(autodata, y, x, addcons=True, check_colinear=True)
assert 'Colinear variables: \nmpg2' == str(e.value)
if __name__ == '__main__':
pytest.main()
|
bsd-3-clause
|
xuewei4d/scikit-learn
|
sklearn/preprocessing/tests/test_label.py
|
8
|
22347
|
import numpy as np
import pytest
from scipy.sparse import issparse
from scipy.sparse import coo_matrix
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
from sklearn.utils.multiclass import type_of_target
from sklearn.utils._testing import assert_array_equal
from sklearn.utils._testing import assert_warns_message
from sklearn.utils._testing import ignore_warnings
from sklearn.utils import _to_object_array
from sklearn.preprocessing._label import LabelBinarizer
from sklearn.preprocessing._label import MultiLabelBinarizer
from sklearn.preprocessing._label import LabelEncoder
from sklearn.preprocessing._label import label_binarize
from sklearn.preprocessing._label import _inverse_binarize_thresholding
from sklearn.preprocessing._label import _inverse_binarize_multiclass
from sklearn import datasets
iris = datasets.load_iris()
def toarray(a):
if hasattr(a, "toarray"):
a = a.toarray()
return a
def test_label_binarizer():
# one-class case defaults to negative label
# For dense case:
inp = ["pos", "pos", "pos", "pos"]
lb = LabelBinarizer(sparse_output=False)
expected = np.array([[0, 0, 0, 0]]).T
got = lb.fit_transform(inp)
assert_array_equal(lb.classes_, ["pos"])
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
# For sparse case:
lb = LabelBinarizer(sparse_output=True)
got = lb.fit_transform(inp)
assert issparse(got)
assert_array_equal(lb.classes_, ["pos"])
assert_array_equal(expected, got.toarray())
assert_array_equal(lb.inverse_transform(got.toarray()), inp)
lb = LabelBinarizer(sparse_output=False)
# two-class case
inp = ["neg", "pos", "pos", "neg"]
expected = np.array([[0, 1, 1, 0]]).T
got = lb.fit_transform(inp)
assert_array_equal(lb.classes_, ["neg", "pos"])
assert_array_equal(expected, got)
to_invert = np.array([[1, 0],
[0, 1],
[0, 1],
[1, 0]])
assert_array_equal(lb.inverse_transform(to_invert), inp)
# multi-class case
inp = ["spam", "ham", "eggs", "ham", "0"]
expected = np.array([[0, 0, 0, 1],
[0, 0, 1, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[1, 0, 0, 0]])
got = lb.fit_transform(inp)
assert_array_equal(lb.classes_, ['0', 'eggs', 'ham', 'spam'])
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
def test_label_binarizer_unseen_labels():
lb = LabelBinarizer()
expected = np.array([[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
got = lb.fit_transform(['b', 'd', 'e'])
assert_array_equal(expected, got)
expected = np.array([[0, 0, 0],
[1, 0, 0],
[0, 0, 0],
[0, 1, 0],
[0, 0, 1],
[0, 0, 0]])
got = lb.transform(['a', 'b', 'c', 'd', 'e', 'f'])
assert_array_equal(expected, got)
def test_label_binarizer_set_label_encoding():
lb = LabelBinarizer(neg_label=-2, pos_label=0)
# two-class case with pos_label=0
inp = np.array([0, 1, 1, 0])
expected = np.array([[-2, 0, 0, -2]]).T
got = lb.fit_transform(inp)
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
lb = LabelBinarizer(neg_label=-2, pos_label=2)
# multi-class case
inp = np.array([3, 2, 1, 2, 0])
expected = np.array([[-2, -2, -2, +2],
[-2, -2, +2, -2],
[-2, +2, -2, -2],
[-2, -2, +2, -2],
[+2, -2, -2, -2]])
got = lb.fit_transform(inp)
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
@ignore_warnings
def test_label_binarizer_errors():
# Check that invalid arguments yield ValueError
one_class = np.array([0, 0, 0, 0])
lb = LabelBinarizer().fit(one_class)
multi_label = [(2, 3), (0,), (0, 2)]
with pytest.raises(ValueError):
lb.transform(multi_label)
lb = LabelBinarizer()
with pytest.raises(ValueError):
lb.transform([])
with pytest.raises(ValueError):
lb.inverse_transform([])
with pytest.raises(ValueError):
LabelBinarizer(neg_label=2, pos_label=1)
with pytest.raises(ValueError):
LabelBinarizer(neg_label=2, pos_label=2)
with pytest.raises(ValueError):
LabelBinarizer(neg_label=1, pos_label=2, sparse_output=True)
# Fail on y_type
with pytest.raises(ValueError):
_inverse_binarize_thresholding(y=csr_matrix([[1, 2], [2, 1]]),
output_type="foo", classes=[1, 2],
threshold=0)
# Sequence of seq type should raise ValueError
y_seq_of_seqs = [[], [1, 2], [3], [0, 1, 3], [2]]
with pytest.raises(ValueError):
LabelBinarizer().fit_transform(y_seq_of_seqs)
# Fail on the number of classes
with pytest.raises(ValueError):
_inverse_binarize_thresholding(y=csr_matrix([[1, 2], [2, 1]]),
output_type="foo",
classes=[1, 2, 3],
threshold=0)
# Fail on the dimension of 'binary'
with pytest.raises(ValueError):
_inverse_binarize_thresholding(y=np.array([[1, 2, 3], [2, 1, 3]]),
output_type="binary",
classes=[1, 2, 3],
threshold=0)
# Fail on multioutput data
with pytest.raises(ValueError):
LabelBinarizer().fit(np.array([[1, 3], [2, 1]]))
with pytest.raises(ValueError):
label_binarize(np.array([[1, 3], [2, 1]]), classes=[1, 2, 3])
@pytest.mark.parametrize(
"values, classes, unknown",
[(np.array([2, 1, 3, 1, 3], dtype='int64'),
np.array([1, 2, 3], dtype='int64'), np.array([4], dtype='int64')),
(np.array(['b', 'a', 'c', 'a', 'c'], dtype=object),
np.array(['a', 'b', 'c'], dtype=object),
np.array(['d'], dtype=object)),
(np.array(['b', 'a', 'c', 'a', 'c']),
np.array(['a', 'b', 'c']), np.array(['d']))],
ids=['int64', 'object', 'str'])
def test_label_encoder(values, classes, unknown):
# Test LabelEncoder's transform, fit_transform and
# inverse_transform methods
le = LabelEncoder()
le.fit(values)
assert_array_equal(le.classes_, classes)
assert_array_equal(le.transform(values), [1, 0, 2, 0, 2])
assert_array_equal(le.inverse_transform([1, 0, 2, 0, 2]), values)
le = LabelEncoder()
ret = le.fit_transform(values)
assert_array_equal(ret, [1, 0, 2, 0, 2])
with pytest.raises(ValueError, match="unseen labels"):
le.transform(unknown)
def test_label_encoder_negative_ints():
le = LabelEncoder()
le.fit([1, 1, 4, 5, -1, 0])
assert_array_equal(le.classes_, [-1, 0, 1, 4, 5])
assert_array_equal(le.transform([0, 1, 4, 4, 5, -1, -1]),
[1, 2, 3, 3, 4, 0, 0])
assert_array_equal(le.inverse_transform([1, 2, 3, 3, 4, 0, 0]),
[0, 1, 4, 4, 5, -1, -1])
with pytest.raises(ValueError):
le.transform([0, 6])
@pytest.mark.parametrize("dtype", ['str', 'object'])
def test_label_encoder_str_bad_shape(dtype):
le = LabelEncoder()
le.fit(np.array(["apple", "orange"], dtype=dtype))
msg = "should be a 1d array"
with pytest.raises(ValueError, match=msg):
le.transform("apple")
def test_label_encoder_errors():
# Check that invalid arguments yield ValueError
le = LabelEncoder()
with pytest.raises(ValueError):
le.transform([])
with pytest.raises(ValueError):
le.inverse_transform([])
# Fail on unseen labels
le = LabelEncoder()
le.fit([1, 2, 3, -1, 1])
msg = "contains previously unseen labels"
with pytest.raises(ValueError, match=msg):
le.inverse_transform([-2])
with pytest.raises(ValueError, match=msg):
le.inverse_transform([-2, -3, -4])
# Fail on inverse_transform("")
msg = r"should be a 1d array.+shape \(\)"
with pytest.raises(ValueError, match=msg):
le.inverse_transform("")
@pytest.mark.parametrize(
"values",
[np.array([2, 1, 3, 1, 3], dtype='int64'),
np.array(['b', 'a', 'c', 'a', 'c'], dtype=object),
np.array(['b', 'a', 'c', 'a', 'c'])],
ids=['int64', 'object', 'str'])
def test_label_encoder_empty_array(values):
le = LabelEncoder()
le.fit(values)
# test empty transform
transformed = le.transform([])
assert_array_equal(np.array([]), transformed)
# test empty inverse transform
inverse_transformed = le.inverse_transform([])
assert_array_equal(np.array([]), inverse_transformed)
def test_sparse_output_multilabel_binarizer():
# test input as iterable of iterables
inputs = [
lambda: [(2, 3), (1,), (1, 2)],
lambda: ({2, 3}, {1}, {1, 2}),
lambda: iter([iter((2, 3)), iter((1,)), {1, 2}]),
]
indicator_mat = np.array([[0, 1, 1],
[1, 0, 0],
[1, 1, 0]])
inverse = inputs[0]()
for sparse_output in [True, False]:
for inp in inputs:
# With fit_transform
mlb = MultiLabelBinarizer(sparse_output=sparse_output)
got = mlb.fit_transform(inp())
assert issparse(got) == sparse_output
if sparse_output:
# verify CSR assumption that indices and indptr have same dtype
assert got.indices.dtype == got.indptr.dtype
got = got.toarray()
assert_array_equal(indicator_mat, got)
assert_array_equal([1, 2, 3], mlb.classes_)
assert mlb.inverse_transform(got) == inverse
# With fit
mlb = MultiLabelBinarizer(sparse_output=sparse_output)
got = mlb.fit(inp()).transform(inp())
assert issparse(got) == sparse_output
if sparse_output:
# verify CSR assumption that indices and indptr have same dtype
assert got.indices.dtype == got.indptr.dtype
got = got.toarray()
assert_array_equal(indicator_mat, got)
assert_array_equal([1, 2, 3], mlb.classes_)
assert mlb.inverse_transform(got) == inverse
with pytest.raises(ValueError):
mlb.inverse_transform(csr_matrix(np.array([[0, 1, 1],
[2, 0, 0],
[1, 1, 0]])))
def test_multilabel_binarizer():
# test input as iterable of iterables
inputs = [
lambda: [(2, 3), (1,), (1, 2)],
lambda: ({2, 3}, {1}, {1, 2}),
lambda: iter([iter((2, 3)), iter((1,)), {1, 2}]),
]
indicator_mat = np.array([[0, 1, 1],
[1, 0, 0],
[1, 1, 0]])
inverse = inputs[0]()
for inp in inputs:
# With fit_transform
mlb = MultiLabelBinarizer()
got = mlb.fit_transform(inp())
assert_array_equal(indicator_mat, got)
assert_array_equal([1, 2, 3], mlb.classes_)
assert mlb.inverse_transform(got) == inverse
# With fit
mlb = MultiLabelBinarizer()
got = mlb.fit(inp()).transform(inp())
assert_array_equal(indicator_mat, got)
assert_array_equal([1, 2, 3], mlb.classes_)
assert mlb.inverse_transform(got) == inverse
def test_multilabel_binarizer_empty_sample():
mlb = MultiLabelBinarizer()
y = [[1, 2], [1], []]
Y = np.array([[1, 1],
[1, 0],
[0, 0]])
assert_array_equal(mlb.fit_transform(y), Y)
def test_multilabel_binarizer_unknown_class():
mlb = MultiLabelBinarizer()
y = [[1, 2]]
Y = np.array([[1, 0], [0, 1]])
w = 'unknown class(es) [0, 4] will be ignored'
matrix = assert_warns_message(UserWarning, w,
mlb.fit(y).transform, [[4, 1], [2, 0]])
assert_array_equal(matrix, Y)
Y = np.array([[1, 0, 0], [0, 1, 0]])
mlb = MultiLabelBinarizer(classes=[1, 2, 3])
matrix = assert_warns_message(UserWarning, w,
mlb.fit(y).transform, [[4, 1], [2, 0]])
assert_array_equal(matrix, Y)
def test_multilabel_binarizer_given_classes():
inp = [(2, 3), (1,), (1, 2)]
indicator_mat = np.array([[0, 1, 1],
[1, 0, 0],
[1, 0, 1]])
# fit_transform()
mlb = MultiLabelBinarizer(classes=[1, 3, 2])
assert_array_equal(mlb.fit_transform(inp), indicator_mat)
assert_array_equal(mlb.classes_, [1, 3, 2])
# fit().transform()
mlb = MultiLabelBinarizer(classes=[1, 3, 2])
assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat)
assert_array_equal(mlb.classes_, [1, 3, 2])
# ensure works with extra class
mlb = MultiLabelBinarizer(classes=[4, 1, 3, 2])
assert_array_equal(mlb.fit_transform(inp),
np.hstack(([[0], [0], [0]], indicator_mat)))
assert_array_equal(mlb.classes_, [4, 1, 3, 2])
# ensure fit is no-op as iterable is not consumed
inp = iter(inp)
mlb = MultiLabelBinarizer(classes=[1, 3, 2])
assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat)
# ensure a ValueError is thrown if given duplicate classes
err_msg = "The classes argument contains duplicate classes. Remove " \
"these duplicates before passing them to MultiLabelBinarizer."
mlb = MultiLabelBinarizer(classes=[1, 3, 2, 3])
with pytest.raises(ValueError, match=err_msg):
mlb.fit(inp)
def test_multilabel_binarizer_multiple_calls():
inp = [(2, 3), (1,), (1, 2)]
indicator_mat = np.array([[0, 1, 1],
[1, 0, 0],
[1, 0, 1]])
indicator_mat2 = np.array([[0, 1, 1],
[1, 0, 0],
[1, 1, 0]])
# first call
mlb = MultiLabelBinarizer(classes=[1, 3, 2])
assert_array_equal(mlb.fit_transform(inp), indicator_mat)
# second call change class
mlb.classes = [1, 2, 3]
assert_array_equal(mlb.fit_transform(inp), indicator_mat2)
def test_multilabel_binarizer_same_length_sequence():
# Ensure sequences of the same length are not interpreted as a 2-d array
inp = [[1], [0], [2]]
indicator_mat = np.array([[0, 1, 0],
[1, 0, 0],
[0, 0, 1]])
# fit_transform()
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit_transform(inp), indicator_mat)
assert_array_equal(mlb.inverse_transform(indicator_mat), inp)
# fit().transform()
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat)
assert_array_equal(mlb.inverse_transform(indicator_mat), inp)
def test_multilabel_binarizer_non_integer_labels():
tuple_classes = _to_object_array([(1,), (2,), (3,)])
inputs = [
([('2', '3'), ('1',), ('1', '2')], ['1', '2', '3']),
([('b', 'c'), ('a',), ('a', 'b')], ['a', 'b', 'c']),
([((2,), (3,)), ((1,),), ((1,), (2,))], tuple_classes),
]
indicator_mat = np.array([[0, 1, 1],
[1, 0, 0],
[1, 1, 0]])
for inp, classes in inputs:
# fit_transform()
mlb = MultiLabelBinarizer()
inp = np.array(inp, dtype=object)
assert_array_equal(mlb.fit_transform(inp), indicator_mat)
assert_array_equal(mlb.classes_, classes)
indicator_mat_inv = np.array(mlb.inverse_transform(indicator_mat),
dtype=object)
assert_array_equal(indicator_mat_inv, inp)
# fit().transform()
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat)
assert_array_equal(mlb.classes_, classes)
indicator_mat_inv = np.array(mlb.inverse_transform(indicator_mat),
dtype=object)
assert_array_equal(indicator_mat_inv, inp)
mlb = MultiLabelBinarizer()
with pytest.raises(TypeError):
mlb.fit_transform([({}), ({}, {'a': 'b'})])
def test_multilabel_binarizer_non_unique():
inp = [(1, 1, 1, 0)]
indicator_mat = np.array([[1, 1]])
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit_transform(inp), indicator_mat)
def test_multilabel_binarizer_inverse_validation():
inp = [(1, 1, 1, 0)]
mlb = MultiLabelBinarizer()
mlb.fit_transform(inp)
# Not binary
with pytest.raises(ValueError):
mlb.inverse_transform(np.array([[1, 3]]))
# The following binary cases are fine, however
mlb.inverse_transform(np.array([[0, 0]]))
mlb.inverse_transform(np.array([[1, 1]]))
mlb.inverse_transform(np.array([[1, 0]]))
# Wrong shape
with pytest.raises(ValueError):
mlb.inverse_transform(np.array([[1]]))
with pytest.raises(ValueError):
mlb.inverse_transform(np.array([[1, 1, 1]]))
def test_label_binarize_with_class_order():
out = label_binarize([1, 6], classes=[1, 2, 4, 6])
expected = np.array([[1, 0, 0, 0], [0, 0, 0, 1]])
assert_array_equal(out, expected)
# Modified class order
out = label_binarize([1, 6], classes=[1, 6, 4, 2])
expected = np.array([[1, 0, 0, 0], [0, 1, 0, 0]])
assert_array_equal(out, expected)
out = label_binarize([0, 1, 2, 3], classes=[3, 2, 0, 1])
expected = np.array([[0, 0, 1, 0],
[0, 0, 0, 1],
[0, 1, 0, 0],
[1, 0, 0, 0]])
assert_array_equal(out, expected)
def check_binarized_results(y, classes, pos_label, neg_label, expected):
for sparse_output in [True, False]:
if ((pos_label == 0 or neg_label != 0) and sparse_output):
with pytest.raises(ValueError):
label_binarize(y, classes=classes, neg_label=neg_label,
pos_label=pos_label,
sparse_output=sparse_output)
continue
# check label_binarize
binarized = label_binarize(y, classes=classes, neg_label=neg_label,
pos_label=pos_label,
sparse_output=sparse_output)
assert_array_equal(toarray(binarized), expected)
assert issparse(binarized) == sparse_output
# check inverse
y_type = type_of_target(y)
if y_type == "multiclass":
inversed = _inverse_binarize_multiclass(binarized, classes=classes)
else:
inversed = _inverse_binarize_thresholding(binarized,
output_type=y_type,
classes=classes,
threshold=((neg_label +
pos_label) /
2.))
assert_array_equal(toarray(inversed), toarray(y))
# Check label binarizer
lb = LabelBinarizer(neg_label=neg_label, pos_label=pos_label,
sparse_output=sparse_output)
binarized = lb.fit_transform(y)
assert_array_equal(toarray(binarized), expected)
assert issparse(binarized) == sparse_output
inverse_output = lb.inverse_transform(binarized)
assert_array_equal(toarray(inverse_output), toarray(y))
assert issparse(inverse_output) == issparse(y)
def test_label_binarize_binary():
y = [0, 1, 0]
classes = [0, 1]
pos_label = 2
neg_label = -1
expected = np.array([[2, -1], [-1, 2], [2, -1]])[:, 1].reshape((-1, 1))
check_binarized_results(y, classes, pos_label, neg_label, expected)
# Binary case where sparse_output = True will not result in a ValueError
y = [0, 1, 0]
classes = [0, 1]
pos_label = 3
neg_label = 0
expected = np.array([[3, 0], [0, 3], [3, 0]])[:, 1].reshape((-1, 1))
check_binarized_results(y, classes, pos_label, neg_label, expected)
def test_label_binarize_multiclass():
y = [0, 1, 2]
classes = [0, 1, 2]
pos_label = 2
neg_label = 0
expected = 2 * np.eye(3)
check_binarized_results(y, classes, pos_label, neg_label, expected)
with pytest.raises(ValueError):
label_binarize(y, classes=classes, neg_label=-1, pos_label=pos_label,
sparse_output=True)
def test_label_binarize_multilabel():
y_ind = np.array([[0, 1, 0], [1, 1, 1], [0, 0, 0]])
classes = [0, 1, 2]
pos_label = 2
neg_label = 0
expected = pos_label * y_ind
y_sparse = [sparse_matrix(y_ind)
for sparse_matrix in [coo_matrix, csc_matrix, csr_matrix,
dok_matrix, lil_matrix]]
for y in [y_ind] + y_sparse:
check_binarized_results(y, classes, pos_label, neg_label,
expected)
with pytest.raises(ValueError):
label_binarize(y, classes=classes, neg_label=-1, pos_label=pos_label,
sparse_output=True)
def test_invalid_input_label_binarize():
with pytest.raises(ValueError):
label_binarize([0, 2], classes=[0, 2], pos_label=0, neg_label=1)
with pytest.raises(ValueError, match="continuous target data is not "):
label_binarize([1.2, 2.7], classes=[0, 1])
with pytest.raises(ValueError, match="mismatch with the labels"):
label_binarize([[1, 3]], classes=[1, 2, 3])
def test_inverse_binarize_multiclass():
got = _inverse_binarize_multiclass(csr_matrix([[0, 1, 0],
[-1, 0, -1],
[0, 0, 0]]),
np.arange(3))
assert_array_equal(got, np.array([1, 1, 0]))
|
bsd-3-clause
|
meduz/scikit-learn
|
examples/decomposition/plot_incremental_pca.py
|
175
|
1974
|
"""
===============
Incremental PCA
===============
Incremental principal component analysis (IPCA) is typically used as a
replacement for principal component analysis (PCA) when the dataset to be
decomposed is too large to fit in memory. IPCA builds a low-rank approximation
for the input data using an amount of memory which is independent of the
number of input data samples. It is still dependent on the input data features,
but changing the batch size allows for control of memory usage.
This example serves as a visual check that IPCA is able to find a similar
projection of the data to PCA (to a sign flip), while only processing a
few samples at a time. This can be considered a "toy example", as IPCA is
intended for large datasets which do not fit in main memory, requiring
incremental approaches.
"""
print(__doc__)
# Authors: Kyle Kastner
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_iris
from sklearn.decomposition import PCA, IncrementalPCA
iris = load_iris()
X = iris.data
y = iris.target
n_components = 2
ipca = IncrementalPCA(n_components=n_components, batch_size=10)
X_ipca = ipca.fit_transform(X)
pca = PCA(n_components=n_components)
X_pca = pca.fit_transform(X)
colors = ['navy', 'turquoise', 'darkorange']
for X_transformed, title in [(X_ipca, "Incremental PCA"), (X_pca, "PCA")]:
plt.figure(figsize=(8, 8))
for color, i, target_name in zip(colors, [0, 1, 2], iris.target_names):
plt.scatter(X_transformed[y == i, 0], X_transformed[y == i, 1],
color=color, lw=2, label=target_name)
if "Incremental" in title:
err = np.abs(np.abs(X_pca) - np.abs(X_ipca)).mean()
plt.title(title + " of iris dataset\nMean absolute unsigned error "
"%.6f" % err)
else:
plt.title(title + " of iris dataset")
plt.legend(loc="best", shadow=False, scatterpoints=1)
plt.axis([-4, 4, -1.5, 1.5])
plt.show()
|
bsd-3-clause
|
boland1992/SeisSuite
|
seissuite/misc/convex.py
|
8
|
2925
|
# -*- coding: utf-8 -*-
"""
Created on Mon Aug 3 08:48:56 2015
@author: boland
"""
import matplotlib.pylot as plt
from scipy.spatial import ConvexHull
import numpy as np
class Convex:
"""
CLASS CURRENTLY NOT WORKING!
Class defined in order to create a convex hull around an array of points
and then perform functions on them e.g. produce K random points inside,
find N equidistant points within etc.
"""
def __init__(self, points):
# initialise points of interest
self.dots = points
# initialise polygon for potential convex hull with matplotlib Path
self.polygon = 0.
# initialise output points for points inside convex hull!
self.output = 0.
def convex_hull(self, point_set):
"""
Function to produce a convex hull object that surrounds
a set of points. The input must be of the type Nx2 matrix/
numpy array or equivalent. new_point shape is (2,1)
"""
return ConvexHull(point_set)
def poly_hull(self):
"""
Function that generates a matplotlib Path object from convex hull
nodes.
"""
hull = self.convex_hull(self.dots)
X, Y = self.dots[hull.vertices,0], self.dots[hull.vertices,1]
self.polygon = Path(np.column_stack((X, Y)))
return self.polygon
def in_poly_hull(self, point_set):
"""
Function that quickly returns (2,N) array from (2,M) array of
input points such that M >= N and N points are contained within
the self.polygon polygon.
"""
self.polygon = self.poly_hull()
points_in = self.polygon.contains_points(point_set)
self.output = point_set[points_in == True]
return self.output
def plot_hull(self, show_points=False):
"""
Function that plots the boundaries of a convex hull using
matplotlib.pyplot. Input hull must be of type:
scipy.spatial.qhull.ConvexHull
points input must be of the original coordinates.
"""
hull = self.convex_hull(self.dots)
plt.figure()
for simplex in hull.simplices:
plt.plot(self.dots[simplex,0], \
self.dots[simplex,1], 'k-')
if show_points:
plt.scatter(self.dots[:,0], \
self.dots[:,1], s=10,c='g')
plt.scatter(self.dots[:,0], \
self.dots[:,1], s=30,c='orange')
plt.show()
def rand_hull(hull, points, K):
"Generate K new random points contained within a convex hull"
minx, maxx = np.min(points[:,0]), np.max(points[:,0])
miny, maxy = np.min(points[:,1]), np.max(points[:,1])
X = abs(maxx - minx) * np.random.rand(10*K**2,1) + minx
Y = abs(maxy - miny) * np.random.rand(10*K**2,1) + miny
return np.column_stack((X,Y))
|
gpl-3.0
|
phaustin/pyman
|
Book/chap8/Problems/FitPwrLaw.py
|
3
|
2545
|
# Routine FitPwrLaw
import numpy as np
import matplotlib.pyplot as plt
def LineFitWt(x, y, sig):
"""
Returns slope and y-intercept of weighted linear fit to
(x,y) data set.
Inputs: x and y data array and uncertainty array (unc)
for y data set.
Outputs: slope and y-intercept of best fit to data and
uncertainties of slope & y-intercept.
"""
sig2 = sig**2
norm = (1./sig2).sum()
xhat = (x/sig2).sum() / norm
yhat = (y/sig2).sum() / norm
slope = ((x-xhat)*y/sig2).sum()/((x-xhat)*x/sig2).sum()
yint = yhat - slope*xhat
sig2_slope = 1./((x-xhat)*x/sig2).sum()
sig2_yint = sig2_slope * (x*x/sig2).sum() / norm
return slope, yint, np.sqrt(sig2_slope), np.sqrt(sig2_yint)
def redchisq(x, y, dy, slope, yint):
chisq = (((y-yint-slope*x)/dy)**2).sum()
return chisq/float(x.size-2)
# Read data from data file
t, r, dr = np.loadtxt("aggdata.txt", skiprows=4, unpack=True)
########## Code to tranform & fit data starts here ##########
# Transform data and parameters to linear form: Y = A + B*X
X = np.log(t) # transform N data for fitting
Y = np.log(r) # transform t data for fitting
dY = dr/r # transform uncertainties for fitting
# Fit transformed data X, Y, dY to obtain fitting parameters
# A & B. Also returns uncertainties dB & dA in A & B
A, B, dA, dB = LineFitWt(X, Y, dY)
# Return reduced chi-squared
redchisqr = redchisq(X, Y, dY, A, B)
# Determine fitting parameters for original exponential function
# N = N0 exp(-Gamma t) ...
p = A
K = np.exp(B)
# ... and their uncertainties
dp = dA
dK = np.exp(B)*dB
###### Code to plot transformed data and fit starts here ######
# Create line corresponding to fit using fitting parameters
# Only two points are needed to specify a straight line
Xext = 0.05*(X.max()-X.min())
Xfit = np.array([X.min()-Xext, X.max()+Xext])
Yfit = A*Xfit + B # generates Y from X data &
# fitting function
plt.errorbar(X, Y, dY, fmt="gs", ms=3)
plt.plot(Xfit, Yfit, "k-", zorder=-1)
plt.title(r"$\mathrm{Fit\ to:}\ \ln\,r = d\, \ln\,t + \ln\,r_0$ or $Y = AX+B$")
plt.xlabel(r'$\ln\,t$', fontsize=16)
plt.ylabel(r'$\ln\,r$', fontsize=16)
plt.text(-2.7, 8.2, u"A = d = {0:0.2f} \xb1 {1:0.2f}".format(A, dA))
plt.text(-2.7, 8.0, u"B = ln r_0 = {0:0.1f} \xb1 {1:0.1f}".format(B, dB))
plt.text(-2.7, 7.8, u"r_0 = {0:0.1e} \xb1 {1:0.1e}".format(K, dK))
plt.text(-2.7, 7.6, "$\chi_r^2$ = {0:0.3f}".format(redchisqr))
plt.show()
plt.savefig("FitSemiLogWtsErrBars.pdf")
|
cc0-1.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.