repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
cybernet14/scikit-learn | examples/feature_stacker.py | 246 | 1906 | """
=================================================
Concatenating multiple feature extraction methods
=================================================
In many real-world examples, there are many ways to extract features from a
dataset. Often it is beneficial to combine several methods to obtain good
performance. This example shows how to use ``FeatureUnion`` to combine
features obtained by PCA and univariate selection.
Combining features using this transformer has the benefit that it allows
cross validation and grid searches over the whole process.
The combination used in this example is not particularly helpful on this
dataset and is only used to illustrate the usage of FeatureUnion.
"""
# Author: Andreas Mueller <[email protected]>
#
# License: BSD 3 clause
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.grid_search import GridSearchCV
from sklearn.svm import SVC
from sklearn.datasets import load_iris
from sklearn.decomposition import PCA
from sklearn.feature_selection import SelectKBest
iris = load_iris()
X, y = iris.data, iris.target
# This dataset is way to high-dimensional. Better do PCA:
pca = PCA(n_components=2)
# Maybe some original features where good, too?
selection = SelectKBest(k=1)
# Build estimator from PCA and Univariate selection:
combined_features = FeatureUnion([("pca", pca), ("univ_select", selection)])
# Use combined features to transform dataset:
X_features = combined_features.fit(X, y).transform(X)
svm = SVC(kernel="linear")
# Do grid search over k, n_components and C:
pipeline = Pipeline([("features", combined_features), ("svm", svm)])
param_grid = dict(features__pca__n_components=[1, 2, 3],
features__univ_select__k=[1, 2],
svm__C=[0.1, 1, 10])
grid_search = GridSearchCV(pipeline, param_grid=param_grid, verbose=10)
grid_search.fit(X, y)
print(grid_search.best_estimator_)
| bsd-3-clause |
HeraclesHX/scikit-learn | benchmarks/bench_plot_neighbors.py | 287 | 6433 | """
Plot the scaling of the nearest neighbors algorithms with k, D, and N
"""
from time import time
import numpy as np
import pylab as pl
from matplotlib import ticker
from sklearn import neighbors, datasets
def get_data(N, D, dataset='dense'):
if dataset == 'dense':
np.random.seed(0)
return np.random.random((N, D))
elif dataset == 'digits':
X = datasets.load_digits().data
i = np.argsort(X[0])[::-1]
X = X[:, i]
return X[:N, :D]
else:
raise ValueError("invalid dataset: %s" % dataset)
def barplot_neighbors(Nrange=2 ** np.arange(1, 11),
Drange=2 ** np.arange(7),
krange=2 ** np.arange(10),
N=1000,
D=64,
k=5,
leaf_size=30,
dataset='digits'):
algorithms = ('kd_tree', 'brute', 'ball_tree')
fiducial_values = {'N': N,
'D': D,
'k': k}
#------------------------------------------------------------
# varying N
N_results_build = dict([(alg, np.zeros(len(Nrange)))
for alg in algorithms])
N_results_query = dict([(alg, np.zeros(len(Nrange)))
for alg in algorithms])
for i, NN in enumerate(Nrange):
print("N = %i (%i out of %i)" % (NN, i + 1, len(Nrange)))
X = get_data(NN, D, dataset)
for algorithm in algorithms:
nbrs = neighbors.NearestNeighbors(n_neighbors=min(NN, k),
algorithm=algorithm,
leaf_size=leaf_size)
t0 = time()
nbrs.fit(X)
t1 = time()
nbrs.kneighbors(X)
t2 = time()
N_results_build[algorithm][i] = (t1 - t0)
N_results_query[algorithm][i] = (t2 - t1)
#------------------------------------------------------------
# varying D
D_results_build = dict([(alg, np.zeros(len(Drange)))
for alg in algorithms])
D_results_query = dict([(alg, np.zeros(len(Drange)))
for alg in algorithms])
for i, DD in enumerate(Drange):
print("D = %i (%i out of %i)" % (DD, i + 1, len(Drange)))
X = get_data(N, DD, dataset)
for algorithm in algorithms:
nbrs = neighbors.NearestNeighbors(n_neighbors=k,
algorithm=algorithm,
leaf_size=leaf_size)
t0 = time()
nbrs.fit(X)
t1 = time()
nbrs.kneighbors(X)
t2 = time()
D_results_build[algorithm][i] = (t1 - t0)
D_results_query[algorithm][i] = (t2 - t1)
#------------------------------------------------------------
# varying k
k_results_build = dict([(alg, np.zeros(len(krange)))
for alg in algorithms])
k_results_query = dict([(alg, np.zeros(len(krange)))
for alg in algorithms])
X = get_data(N, DD, dataset)
for i, kk in enumerate(krange):
print("k = %i (%i out of %i)" % (kk, i + 1, len(krange)))
for algorithm in algorithms:
nbrs = neighbors.NearestNeighbors(n_neighbors=kk,
algorithm=algorithm,
leaf_size=leaf_size)
t0 = time()
nbrs.fit(X)
t1 = time()
nbrs.kneighbors(X)
t2 = time()
k_results_build[algorithm][i] = (t1 - t0)
k_results_query[algorithm][i] = (t2 - t1)
pl.figure(figsize=(8, 11))
for (sbplt, vals, quantity,
build_time, query_time) in [(311, Nrange, 'N',
N_results_build,
N_results_query),
(312, Drange, 'D',
D_results_build,
D_results_query),
(313, krange, 'k',
k_results_build,
k_results_query)]:
ax = pl.subplot(sbplt, yscale='log')
pl.grid(True)
tick_vals = []
tick_labels = []
bottom = 10 ** np.min([min(np.floor(np.log10(build_time[alg])))
for alg in algorithms])
for i, alg in enumerate(algorithms):
xvals = 0.1 + i * (1 + len(vals)) + np.arange(len(vals))
width = 0.8
c_bar = pl.bar(xvals, build_time[alg] - bottom,
width, bottom, color='r')
q_bar = pl.bar(xvals, query_time[alg],
width, build_time[alg], color='b')
tick_vals += list(xvals + 0.5 * width)
tick_labels += ['%i' % val for val in vals]
pl.text((i + 0.02) / len(algorithms), 0.98, alg,
transform=ax.transAxes,
ha='left',
va='top',
bbox=dict(facecolor='w', edgecolor='w', alpha=0.5))
pl.ylabel('Time (s)')
ax.xaxis.set_major_locator(ticker.FixedLocator(tick_vals))
ax.xaxis.set_major_formatter(ticker.FixedFormatter(tick_labels))
for label in ax.get_xticklabels():
label.set_rotation(-90)
label.set_fontsize(10)
title_string = 'Varying %s' % quantity
descr_string = ''
for s in 'NDk':
if s == quantity:
pass
else:
descr_string += '%s = %i, ' % (s, fiducial_values[s])
descr_string = descr_string[:-2]
pl.text(1.01, 0.5, title_string,
transform=ax.transAxes, rotation=-90,
ha='left', va='center', fontsize=20)
pl.text(0.99, 0.5, descr_string,
transform=ax.transAxes, rotation=-90,
ha='right', va='center')
pl.gcf().suptitle("%s data set" % dataset.capitalize(), fontsize=16)
pl.figlegend((c_bar, q_bar), ('construction', 'N-point query'),
'upper right')
if __name__ == '__main__':
barplot_neighbors(dataset='digits')
barplot_neighbors(dataset='dense')
pl.show()
| bsd-3-clause |
mrcslws/htmresearch | projects/capybara/cla_vs_sdr_classifier/run_sdr_classifier.py | 9 | 8898 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2014, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy
import os
from nupic.algorithms.sdr_classifier import SDRClassifier
from nupic.algorithms.CLAClassifier import CLAClassifier
from nupic.encoders.sdrcategory import SDRCategoryEncoder
plt.ion()
plt.close('all')
mpl.rcParams['pdf.fonttype'] = 42
def myLog(x):
x = 0.001 if x < 0.001 else x
return numpy.log(x)
def initializeEncoder(Nelements, seed):
# initialize classifiers
encoder = SDRCategoryEncoder(1024, 40,
categoryList=range(0, Nelements),
encoderSeed=seed)
return encoder
def initializeClassifiers(Nelements, encoder):
claClassiiier = CLAClassifier(steps=[0])
sdrClassifier = SDRClassifier(steps=[0], alpha=0.1)
patternNZ = list(numpy.where(encoder.encode(Nelements - 1))[0])
classification = {'bucketIdx': Nelements - 1, 'actValue': Nelements - 1}
# feed in the pattern with the highest bucket index
claRetval = claClassiiier.compute(0, patternNZ, classification,
learn=True, infer=True)
sdrRetval = sdrClassifier.compute(0, patternNZ, classification,
learn=True, infer=True)
return claClassiiier, sdrClassifier
def runSimulation(encoder, cla, sdrClassifier,
noiseLevel=0.0, changeTaskAfter=-1, nstep=1000):
accuracyTrack = []
negLLTrack = []
for recordNum in xrange(nstep):
# use a different encoder to test continuous learning
if recordNum == changeTaskAfter:
encoder = initializeEncoder(encoder.ncategories - 1, seed=2)
inputSymbol = numpy.random.randint(encoder.ncategories - 1)
activation = encoder.encode(inputSymbol)
# add noise to the SDR to increase task difficulty
if noiseLevel > 0:
numMissBits = numpy.int(encoder.w * noiseLevel)
activeBits = numpy.where(activation)[0]
activation[activeBits[:numMissBits]] = 0
numRandBits = numpy.int(encoder.w * noiseLevel)
newBits = numpy.random.randint(encoder.n, size=(numRandBits,))
activation[newBits] = 1
patternNZ = list(numpy.where(activation)[0])
classification = {'bucketIdx': inputSymbol, 'actValue': inputSymbol}
claRetval = cla.compute(recordNum, patternNZ, classification,
learn=True, infer=True)
sdrRetval = sdrClassifier.compute(recordNum, patternNZ, classification,
learn=True, infer=True)
NNNegLL = myLog(sdrRetval[0][inputSymbol])
ClaNegLL = myLog(claRetval[0][inputSymbol])
NNBestPrediction = numpy.argmax(sdrRetval[0])
NNAccuracy = (NNBestPrediction == inputSymbol)
ClaBestPrediction = numpy.argmax(claRetval[0])
ClaAccuracy = (ClaBestPrediction == inputSymbol)
negLLTrack.append([ClaNegLL, NNNegLL])
accuracyTrack.append([int(ClaAccuracy), int(NNAccuracy)])
return (negLLTrack, accuracyTrack)
def runExperiemnt1():
"""
Run both classifiers on noise-free streams
:return:
"""
negLLTrackSum = 0
accuracyTrackSum = 0
Nrpt = 10
for rpt in range(Nrpt):
Nelements = 20
noiseLevel = 0.0
encoder = initializeEncoder(Nelements, seed=1)
cla, sdrClassifier = initializeClassifiers(Nelements, encoder)
(negLLTrack,
accuracyTrack) = runSimulation(encoder, cla, sdrClassifier, noiseLevel,
nstep=500)
negLLTrack = numpy.array(negLLTrack)
accuracyTrack = numpy.array(accuracyTrack).astype('float32')
negLLTrackSum = negLLTrackSum + negLLTrack
accuracyTrackSum = accuracyTrackSum + accuracyTrack
negLLTrackSum /= Nrpt
accuracyTrackSum /= Nrpt
plt.figure(1)
plt.subplot(2, 2, 1)
v = numpy.ones((5,)) / 5
plt.plot(numpy.convolve(negLLTrackSum[:, 1], v, 'valid'))
plt.plot(numpy.convolve(negLLTrackSum[:, 0], v, 'valid'), '--')
plt.ylim([-4, 0.1])
plt.ylabel(' Log-Likelihood')
plt.xlabel(' Iteration ')
plt.title(' Noise Level: ' + str(noiseLevel))
plt.legend(['SDR Classifier', 'CLA Classifier'], loc=4)
plt.subplot(2, 2, 2)
plt.plot(numpy.convolve(accuracyTrackSum[:, 1], v, 'valid'))
plt.plot(numpy.convolve(accuracyTrackSum[:, 0], v, 'valid'), '--')
plt.ylim([0, 1.05])
plt.ylabel(' Accuracy ')
plt.xlabel(' Iteration ')
plt.title(' Noise Level: ' + str(noiseLevel))
plt.legend(['SDR Classifier', 'CLA Classifier'], loc=4)
if not os.path.exists('results'):
os.makedirs('results')
plt.savefig(os.path.join('results', 'LLvsTraining.pdf'))
# prediction of one input element after training
patternNZ = list(numpy.where(encoder.encode(10))[0])
classification = {'bucketIdx': 10, 'actValue': 10}
numIterations = cla._learnIteration + 1
claRetval = cla.compute(numIterations, patternNZ,
classification, learn=False, infer=True)
sdrRetval = sdrClassifier.compute(numIterations, patternNZ,
classification, learn=False, infer=True)
plt.figure(3)
plt.plot(sdrRetval[0])
plt.plot(claRetval[0])
plt.xlabel('Possible Inputs')
plt.ylabel(' Predicted Probability')
plt.title(' Noise Level: ' + str(noiseLevel))
plt.legend(['SDR', 'CLA'])
if not os.path.exists('results'):
os.makedirs('results')
plt.savefig(os.path.join('results', 'ExamplePredictionAfterTraining.pdf'))
def runExperiment2():
"""
plot LL after training vs. noise level
"""
Nelements = 20
noiseLevelList = numpy.linspace(0, 1.0, num=21)
negLLCLA = []
negLLSDR = []
accuracyCLA = []
accuracySDR = []
for noiseLevel in noiseLevelList:
encoder = initializeEncoder(Nelements, seed=1)
claClassifier, sdrClassifier = initializeClassifiers(Nelements, encoder)
(negLLTrack, accuracyTrack) = runSimulation(
encoder, claClassifier, sdrClassifier, noiseLevel)
negLLTrack = numpy.array(negLLTrack)
accuracyTrack = numpy.array(accuracyTrack)
negLLCLA.append(numpy.mean(negLLTrack[-100:, 0]))
negLLSDR.append(numpy.mean(negLLTrack[-100:, 1]))
accuracyCLA.append(numpy.mean(accuracyTrack[-100:, 0]))
accuracySDR.append(numpy.mean(accuracyTrack[-100:, 1]))
noiseLevelList = noiseLevelList * 40
plt.figure(4)
plt.subplot(2, 2, 1)
plt.plot(noiseLevelList, negLLSDR, '-o')
plt.plot(noiseLevelList, negLLCLA, '-s')
plt.xlabel(' Noise Level (# random bits) ')
plt.ylabel(' Log-likelihood')
plt.legend(['SDR Classifier', 'CLA Classifier'], loc=3)
plt.subplot(2, 2, 2)
plt.plot(noiseLevelList, accuracySDR, '-o')
plt.plot(noiseLevelList, accuracyCLA, '-s')
plt.xlabel(' Noise Level (# random bits) ')
plt.ylabel(' Accuracy ')
plt.legend(['SDR Classifier', 'CLA Classifier'], loc=3)
if not os.path.exists('results'):
os.makedirs('results')
plt.savefig(os.path.join('results', 'LLvsNoise.pdf'))
def runExperiment3():
"""
Change task at iteration=500, test continuous learning
:return:
"""
Nelements = 20
noiseLevel = 0.0
encoder = initializeEncoder(Nelements, seed=1)
cla, sdrClassifier = initializeClassifiers(Nelements, encoder)
(negLLTrack,
accuracyTrack) = runSimulation(encoder, cla, sdrClassifier,
noiseLevel, changeTaskAfter=500)
plt.figure(5)
negLLTrack = numpy.array(negLLTrack)
v = numpy.ones((5,)) / 5
plt.subplot(2, 2, 1)
plt.plot(numpy.convolve(negLLTrack[:, 1], v, 'valid'))
plt.plot(numpy.convolve(negLLTrack[:, 0], v, 'valid'))
plt.ylim([-4, .1])
plt.ylabel(' Log-Likelihood')
plt.xlabel(' Iteration ')
plt.title(' Noise Level: ' + str(noiseLevel))
plt.legend(['SDR Classifier', 'CLA Classifier'], loc=4)
if not os.path.exists('results'):
os.makedirs('results')
plt.savefig(os.path.join('results', 'LLvsTraining_ChangeAt500.pdf'))
if __name__ == "__main__":
# Example prediction with noise-free streams
runExperiemnt1()
# LL vs Noise
runExperiment2()
# Continus learning task
runExperiment3()
| agpl-3.0 |
rtavenar/tslearn | tslearn/docs/examples/metrics/plot_dtw.py | 1 | 4283 | # -*- coding: utf-8 -*-
"""
DTW computation
===============
This example illustrates DTW computation between time series and plots the
optimal alignment path [1].
The image represents cost matrix, that is the squared Euclidean distance for
each time point between both time series, which are represented
at the left and at the top of the cost matrix.
The optimal path, that is the path that minimizes the total cost to go from
the first time point to the last one, is represented in white on the image.
[1] H. Sakoe and S. Chiba, "Dynamic programming algorithm optimization
for spoken word recognition". IEEE Transactions on Acoustics, Speech, and
Signal Processing, 26(1), 43-49 (1978).
"""
# Author: Romain Tavenard
# License: BSD 3 clause
import numpy
from scipy.spatial.distance import cdist
import matplotlib.pyplot as plt
from tslearn.generators import random_walks
from tslearn.preprocessing import TimeSeriesScalerMeanVariance
from tslearn import metrics
numpy.random.seed(0)
s_x = numpy.array(
[-0.790, -0.765, -0.734, -0.700, -0.668, -0.639, -0.612, -0.587, -0.564,
-0.544, -0.529, -0.518, -0.509, -0.502, -0.494, -0.488, -0.482, -0.475,
-0.472, -0.470, -0.465, -0.464, -0.461, -0.458, -0.459, -0.460, -0.459,
-0.458, -0.448, -0.431, -0.408, -0.375, -0.333, -0.277, -0.196, -0.090,
0.047, 0.220, 0.426, 0.671, 0.962, 1.300, 1.683, 2.096, 2.510, 2.895,
3.219, 3.463, 3.621, 3.700, 3.713, 3.677, 3.606, 3.510, 3.400, 3.280,
3.158, 3.038, 2.919, 2.801, 2.676, 2.538, 2.382, 2.206, 2.016, 1.821,
1.627, 1.439, 1.260, 1.085, 0.917, 0.758, 0.608, 0.476, 0.361, 0.259,
0.173, 0.096, 0.027, -0.032, -0.087, -0.137, -0.179, -0.221, -0.260,
-0.293, -0.328, -0.359, -0.385, -0.413, -0.437, -0.458, -0.480, -0.498,
-0.512, -0.526, -0.536, -0.544, -0.552, -0.556, -0.561, -0.565, -0.568,
-0.570, -0.570, -0.566, -0.560, -0.549, -0.532, -0.510, -0.480, -0.443,
-0.402, -0.357, -0.308, -0.256, -0.200, -0.139, -0.073, -0.003, 0.066,
0.131, 0.186, 0.229, 0.259, 0.276, 0.280, 0.272, 0.256, 0.234, 0.209,
0.186, 0.162, 0.139, 0.112, 0.081, 0.046, 0.008, -0.032, -0.071, -0.110,
-0.147, -0.180, -0.210, -0.235, -0.256, -0.275, -0.292, -0.307, -0.320,
-0.332, -0.344, -0.355, -0.363, -0.367, -0.364, -0.351, -0.330, -0.299,
-0.260, -0.217, -0.172, -0.128, -0.091, -0.060, -0.036, -0.022, -0.016,
-0.020, -0.037, -0.065, -0.104, -0.151, -0.201, -0.253, -0.302, -0.347,
-0.388, -0.426, -0.460, -0.491, -0.517, -0.539, -0.558, -0.575, -0.588,
-0.600, -0.606, -0.607, -0.604, -0.598, -0.589, -0.577, -0.558, -0.531,
-0.496, -0.454, -0.410, -0.364, -0.318, -0.276, -0.237, -0.203, -0.176,
-0.157, -0.145, -0.142, -0.145, -0.154, -0.168, -0.185, -0.206, -0.230,
-0.256, -0.286, -0.318, -0.351, -0.383, -0.414, -0.442, -0.467, -0.489,
-0.508, -0.523, -0.535, -0.544, -0.552, -0.557, -0.560, -0.560, -0.557,
-0.551, -0.542, -0.531, -0.519, -0.507, -0.494, -0.484, -0.476, -0.469,
-0.463, -0.456, -0.449, -0.442, -0.435, -0.431, -0.429, -0.430, -0.435,
-0.442, -0.452, -0.465, -0.479, -0.493, -0.506, -0.517, -0.526, -0.535,
-0.548, -0.567, -0.592, -0.622, -0.655, -0.690, -0.728, -0.764, -0.795,
-0.815, -0.823, -0.821])
s_y1 = numpy.concatenate((s_x, s_x)).reshape((-1, 1))
s_y2 = numpy.concatenate((s_x, s_x[::-1])).reshape((-1, 1))
sz = s_y1.shape[0]
path, sim = metrics.dtw_path(s_y1, s_y2)
plt.figure(1, figsize=(8, 8))
# definitions for the axes
left, bottom = 0.01, 0.1
w_ts = h_ts = 0.2
left_h = left + w_ts + 0.02
width = height = 0.65
bottom_h = bottom + height + 0.02
rect_s_y = [left, bottom, w_ts, height]
rect_gram = [left_h, bottom, width, height]
rect_s_x = [left_h, bottom_h, width, h_ts]
ax_gram = plt.axes(rect_gram)
ax_s_x = plt.axes(rect_s_x)
ax_s_y = plt.axes(rect_s_y)
mat = cdist(s_y1, s_y2)
ax_gram.imshow(mat, origin='lower')
ax_gram.axis("off")
ax_gram.autoscale(False)
ax_gram.plot([j for (i, j) in path], [i for (i, j) in path], "w-",
linewidth=3.)
ax_s_x.plot(numpy.arange(sz), s_y2, "b-", linewidth=3.)
ax_s_x.axis("off")
ax_s_x.set_xlim((0, sz - 1))
ax_s_y.plot(- s_y1, numpy.arange(sz), "b-", linewidth=3.)
ax_s_y.axis("off")
ax_s_y.set_ylim((0, sz - 1))
plt.tight_layout()
plt.show()
| bsd-2-clause |
mindriot101/bokeh | examples/plotting/file/unemployment.py | 5 | 1905 | from math import pi
import pandas as pd
from bokeh.io import show
from bokeh.models import LinearColorMapper, BasicTicker, PrintfTickFormatter, ColorBar
from bokeh.plotting import figure
from bokeh.sampledata.unemployment1948 import data
data['Year'] = data['Year'].astype(str)
data = data.set_index('Year')
data.drop('Annual', axis=1, inplace=True)
data.columns.name = 'Month'
years = list(data.index)
months = list(data.columns)
# reshape to 1D array or rates with a month and year for each row.
df = pd.DataFrame(data.stack(), columns=['rate']).reset_index()
# this is the colormap from the original NYTimes plot
colors = ["#75968f", "#a5bab7", "#c9d9d3", "#e2e2e2", "#dfccce", "#ddb7b1", "#cc7878", "#933b41", "#550b1d"]
mapper = LinearColorMapper(palette=colors, low=df.rate.min(), high=df.rate.max())
TOOLS = "hover,save,pan,box_zoom,reset,wheel_zoom"
p = figure(title="US Unemployment ({0} - {1})".format(years[0], years[-1]),
x_range=years, y_range=list(reversed(months)),
x_axis_location="above", plot_width=900, plot_height=400,
tools=TOOLS, toolbar_location='below',
tooltips=[('date', '@Month @Year'), ('rate', '@rate%')])
p.grid.grid_line_color = None
p.axis.axis_line_color = None
p.axis.major_tick_line_color = None
p.axis.major_label_text_font_size = "5pt"
p.axis.major_label_standoff = 0
p.xaxis.major_label_orientation = pi / 3
p.rect(x="Year", y="Month", width=1, height=1,
source=df,
fill_color={'field': 'rate', 'transform': mapper},
line_color=None)
color_bar = ColorBar(color_mapper=mapper, major_label_text_font_size="5pt",
ticker=BasicTicker(desired_num_ticks=len(colors)),
formatter=PrintfTickFormatter(format="%d%%"),
label_standoff=6, border_line_color=None, location=(0, 0))
p.add_layout(color_bar, 'right')
show(p) # show the plot
| bsd-3-clause |
JoshuaW1990/bus_arrival_prediction | implementation/feature_selection.py | 1 | 12422 | """
Feature selection:
compare the mse result with different feature list
"""
# import the modules
import pandas as pd
import os
from sklearn.model_selection import train_test_split
from sklearn import linear_model, svm, neural_network, preprocessing
from sklearn.metrics import mean_squared_error as MSE
import json
import matplotlib.pyplot as plt
import GPy
def preprocess_dataset(origin_dataset):
"""
Preprocess the dataset for obtaining the input matrix
:param origin_dataset: original dataset read from the dataset table
:return: the preprocessed dataset which only extracts the necessary information
"""
# preprocess to obtain the dataset
full_dataset = pd.DataFrame()
full_dataset['weather'] = origin_dataset['weather']
full_dataset['rush_hour'] = origin_dataset['rush_hour']
full_dataset['baseline_result'] = origin_dataset['baseline_result']
full_dataset['ratio_baseline'] = origin_dataset['actual_arrival_time'] / origin_dataset['baseline_result']
full_dataset['ratio_current_trip'] = origin_dataset['ratio_current_trip']
full_dataset['ratio_prev_trip'] = origin_dataset['ratio_prev_trip']
full_dataset['ratio_prev_seg'] = origin_dataset['actual_arrival_time'] / origin_dataset['prev_arrival_time']
full_dataset['prev_arrival_time'] = origin_dataset['prev_arrival_time']
full_dataset['actual_arrival_time'] = origin_dataset['actual_arrival_time']
return full_dataset
def split_dataset(dataset, feature_list):
"""
Split the dataset into training set and test set
:param dataset: preprcessed dataset which only contains the necessary information
:param feature_list: the list of features
:return: list of matrix for training set and test set
"""
X = dataset.as_matrix(columns=feature_list)
y = dataset.as_matrix(columns=['ratio_baseline', 'baseline_result', 'actual_arrival_time'])
# normalization
X_normalized = preprocessing.normalize(X, norm='l2')
# split the dataset
X_train, X_test, output_train, output_test = train_test_split(X_normalized, y, test_size=0.33, random_state=42)
output_train = output_train.transpose()
output_test = output_test.transpose()
return X_train, X_test, output_train, output_test
def generate_ratio_result(X_train, X_test, y_train, y_test):
"""
Predict the ratio: true_arrival_time/baseline_arrival_time
:param X_train: the features of the training set
:param X_test: the output of the training set
:param y_train: the features of the test set
:param y_test: the output of the test set
:return: dataframe of the predicted result under different models
"""
# generate the result for random samples
ratio_result = pd.DataFrame(y_test, columns=['ratio_baseline'])
print 'linear regression'
model1 = linear_model.LinearRegression()
model1.fit(X_train, y_train)
y_pred = model1.predict(X_test)
ratio_result['linear_regression'] = y_pred
print 'SVM'
model2 = svm.SVR()
model2.fit(X_train, y_train)
y_pred = model2.predict(X_test)
ratio_result['SVM'] = y_pred
print 'NN'
model3 = neural_network.MLPRegressor(solver='lbfgs', max_iter=1000, learning_rate_init=0.005)
model3.fit(X_train, y_train)
y_pred = model3.predict(X_test)
ratio_result['NN'] = y_pred
print "Gaussian Process"
m_full = GPy.models.SparseGPRegression(X_train, y_train.reshape(len(y_train), 1))
m_full.optimize('bfgs')
y_pred, y_var = m_full.predict(X_test)
ratio_result['GP'] = y_pred
return ratio_result
def check_performance(output_test, ratio_result):
"""
Convert the ratio result into actual values and calculate the MSE for the ratios and actual values
:param output_test: the list of information for each predicted outputs including the predicted result from the baseline algorithm and the actual arrival time. Each record of these information are necessary to assess the performance of the predicted result
:param ratio_result: the predicted result from each model
:return: the predicted arrival time, predicted ratio and the corresponding MSEs
"""
# process the result to obtain the pred arrival time with different models
time_result = pd.DataFrame(output_test[2], columns=['actual'])
time_result['baseline'] = output_test[1]
time_result['linear_regression'] = time_result['baseline'] * ratio_result['linear_regression']
time_result['SVM'] = time_result['baseline'] * ratio_result['SVM']
time_result['NN'] = time_result['baseline'] * ratio_result['NN']
time_result['GP'] = time_result['baseline'] * ratio_result['GP']
# calculate the MSE of the arrival time
columns = time_result.columns
mse_time = dict()
for column in columns:
if column == 'actual':
continue
mse_time[column] = MSE(time_result['actual'], time_result[column])
# process the result to obtain the ratio(actual_arrival_time / pred_arrival_time)
ratio_result['linear_regression'] = ratio_result['ratio_baseline'] / ratio_result['linear_regression']
ratio_result['SVM'] = ratio_result['ratio_baseline'] / ratio_result['SVM']
ratio_result['NN'] = ratio_result['ratio_baseline'] / ratio_result['NN']
ratio_result['GP'] = ratio_result['ratio_baseline'] / ratio_result['GP']
# calculate the MSE of ratio
columns = ratio_result.columns
true_ratio = [1.0] * len(ratio_result)
mse_ratio = dict()
for column in columns:
mse_ratio[column] = MSE(true_ratio, ratio_result[column])
return time_result, mse_time, ratio_result, mse_ratio
def export_files(time_result, mse_time, ratio_result, mse_ratio, feature, feature_list, save_path):
"""
Export the results: ratios, actual values, MSEs
:param time_result: the predicted arrival time under different models
:param mse_time: the corresponding MSE for time_result
:param ratio_result: the predicted ratio under different models
:param mse_ratio: the corresponding MSE for ratio_result
:param feature: the feature need to be removed
:param feature_list: the list of remained features
:param save_path: the path to export the files
:return: None
"""
if not os.path.exists(save_path):
os.mkdir(save_path)
path = save_path + str(len(feature_list)) + '/' + feature + '/'
if not os.path.exists(save_path + str(len(feature_list)) + '/'):
os.mkdir(save_path + str(len(feature_list)) + '/')
print "prepare to export file: ", path
if not os.path.exists(path):
os.mkdir(path)
with open(path + 'descrip.txt', 'w') as f:
f.write(str(feature_list))
# export figures
columns = time_result.columns
for column in columns:
if column == 'actual':
continue
filename = column + '.png'
figure = time_result.plot(kind='scatter', y=column, x='actual', xlim=(0, 6000), ylim=(0, 6000))
fig = figure.get_figure()
fig.savefig(path + filename)
plt.close(fig)
# export mse files
with open(path + 'mse_ratio.json', 'w') as f:
json.dump(mse_ratio, f)
with open(path + 'mse_time.json', 'w') as f:
json.dump(mse_time, f)
# export the csv file
time_result.to_csv(path + 'time_result.csv')
ratio_result.to_csv(path + 'ratio_result.csv')
#################################################################################################################
# main function #
#################################################################################################################
def run_feature_selection(dataset, tablename=None, save_path=None, engine=None):
"""
run an incomplete feature selection for the models
:param dataset: the dataframe for dataset table
:param save_path: the path to export result
:param engine: the database connector
:return: the dataframe of the feature selection under different models
"""
plt.style.use('ggplot')
dataset.reset_index(inplace=True)
full_dataset = preprocess_dataset(dataset)
features = ['weather', 'rush_hour', 'baseline_result', 'ratio_current_trip', 'ratio_prev_trip', 'prev_arrival_time']
mse_compare = pd.DataFrame(
columns=['feature_removed', 'time_baseline', 'time_linear_regression', 'time_SVM', 'time_NN', 'time_GP',
'ratio_baseline', 'ratio_linear_regression', 'ratio_SVM', 'ratio_NN', 'ratio_GP'])
# complete features
X_train, X_test, output_train, output_test = split_dataset(full_dataset, features)
y_train = output_train[0]
y_test = output_test[0]
ratio_result = generate_ratio_result(X_train, X_test, y_train, y_test)
time_result, mse_time, ratio_result, mse_ratio = check_performance(output_test, ratio_result)
removed_features = ['None']
if save_path is not None:
export_files(time_result, mse_time, ratio_result, mse_ratio, 'AND'.join(removed_features), features, save_path)
mse_compare.loc[len(mse_compare)] = ['AND'.join(removed_features), mse_time['baseline'],
mse_time['linear_regression'], mse_time['SVM'], mse_time['NN'], mse_time['GP'],
mse_ratio['ratio_baseline'], mse_ratio['linear_regression'], mse_ratio['SVM'],
mse_ratio['NN'], mse_ratio['GP']]
# remove one feature
for i in xrange(len(features)):
feature1 = features[i]
tmp_features = list(features)
tmp_features.remove(feature1)
X_train, X_test, output_train, output_test = split_dataset(full_dataset, tmp_features)
y_train = output_train[0]
y_test = output_test[0]
ratio_result = generate_ratio_result(X_train, X_test, y_train, y_test)
time_result, mse_time, ratio_result, mse_ratio = check_performance(output_test, ratio_result)
removed_features = [feature1]
if save_path is not None:
export_files(time_result, mse_time, ratio_result, mse_ratio, 'AND'.join(removed_features), tmp_features, save_path)
mse_compare.loc[len(mse_compare)] = ['AND'.join(removed_features), mse_time['baseline'],
mse_time['linear_regression'], mse_time['SVM'], mse_time['NN'],
mse_time['GP'], mse_ratio['ratio_baseline'],
mse_ratio['linear_regression'], mse_ratio['SVM'], mse_ratio['NN'],
mse_ratio['GP']]
# remove two features
for i in xrange(len(features)):
for j in xrange(i + 1, len(features)):
feature1 = features[i]
feature2 = features[j]
tmp_features = list(features)
tmp_features.remove(feature1)
tmp_features.remove(feature2)
print tmp_features
X_train, X_test, output_train, output_test = split_dataset(full_dataset, tmp_features)
y_train = output_train[0]
y_test = output_test[0]
ratio_result = generate_ratio_result(X_train, X_test, y_train, y_test)
time_result, mse_time, ratio_result, mse_ratio = check_performance(output_test, ratio_result)
removed_features = [feature1, feature2]
if save_path is not None:
export_files(time_result, mse_time, ratio_result, mse_ratio, 'AND'.join(removed_features), tmp_features, save_path)
mse_compare.loc[len(mse_compare)] = ['AND'.join(removed_features), mse_time['baseline'],
mse_time['linear_regression'], mse_time['SVM'], mse_time['NN'],
mse_time['GP'], mse_ratio['ratio_baseline'],
mse_ratio['linear_regression'], mse_ratio['SVM'], mse_ratio['NN'],
mse_ratio['GP']]
if save_path is not None:
if not os.path.exists(save_path):
os.mkdir(save_path)
mse_compare.to_csv(save_path + tablename + '.csv')
if engine is not None:
mse_compare.to_sql(name=tablename, con=engine, if_exists='replace', index_label='id')
return mse_compare
| mit |
sabersf/Botnets | Sector_pred.py | 1 | 11255 | from __future__ import print_function
__author__ = 'Saber Shokat Fadaee'
#from gensim import corpora, models, similarities
#from gensim.models.doc2vec import TaggedDocument, LabeledSentence, Doc2Vec
#import gensim
from sklearn import manifold, datasets
import numpy as np
from itertools import chain
import multiprocessing
import csv
import matplotlib as ml
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import re
from matplotlib.backends.backend_pdf import PdfPages
import random
import numpy as np
import lda
from sklearn.datasets import make_checkerboard
from sklearn.datasets import samples_generator as sg
from sklearn.cluster.bicluster import SpectralBiclustering
from sklearn.cluster.bicluster import SpectralCoclustering
from sklearn.cluster.bicluster import SpectralCoclustering
from sklearn.cluster import MiniBatchKMeans
from sklearn.externals.six import iteritems
from sklearn.datasets.twenty_newsgroups import fetch_20newsgroups
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.cluster import v_measure_score
from sklearn.utils.extmath import *
from sklearn.metrics import consensus_score
import operator
storage = {}
i = 1.0
EID_set = set()
botnet_set = set()
event_set = set()
drop_ent = []
file1 = open('drop_ent.txt')
for line in file1:
drop_ent.append(line.strip())
file1.close()
file1 = open('EID.txt')
for line in file1:
EID = line.strip()
EID_set.add(EID)
file1.close()
file1= open("botnets.txt")
for line in file1:
botnet = line.strip()
botnet_set.add(botnet)
file1.close()
count = np.loadtxt("count.txt")
botnet_family = []
file1= open("bot_relations.txt")
for line in file1:
botnet_family.append(line.strip().split())
file1.close()
#Plus one for the unidentified classes
num_classes = len(botnet_family) + 1
EID_set = sorted(EID_set)
botnet_set = sorted(botnet_set)
event_set = sorted(event_set)
#Set colors to each category
def sec_to_col(argument):
switcher = {
'Aerospace/Defense': 'aqua',
'Business Services': 'blueviolet',
'Consumer Goods': 'brown',
'Education': 'coral',
'Energy/Resources': 'crimson',
'Engineering': 'darkgreen',
'Finance': 'gold',
'Food Production': 'green',
'Government/Politics': 'lime',
'Healthcare/Wellness': 'magenta',
'Insurance': 'mintcream',
'Legal': 'olive',
'Manufacturing': 'orchid',
'Media/Entertainment': 'peru',
'Nonprofit/NGO': 'purple',
'Real Estate': 'red',
'Retail': 'skyblue',
'Technology': 'silver',
'Telecommunications': 'tomato',
'Tourism/Hospitality': 'peachpuff',
'Transportation': 'rosybrown',
'Unknown': 'dimgray',
'Utilities': 'royalblue',
}
return switcher.get(argument, "yellow")
#Set color to the different sizes
def size_to_col(argument):
switcher = {
'0-100': 'red',
'100-1000': 'blue',
'1000-10000': 'brown',
'10000-50000': 'green',
'50000+': 'gold',
'Unknown': 'lime',
}
return switcher.get(argument, "yellow")
# Assigns the topics to the documents in corpus
col = []
col_size = []
sector = {}
count_range = {}
#Adding extra information
with open('extra.csv', 'rb' ) as theFile:
reader = csv.DictReader( theFile )
for line in reader:
ind = int(line[''])
eid = line['entity_id_hash']
sec = line['industry_sector']
cnt = line['employee_count_range']
sector[eid] = sec
count_range[eid] = cnt
#Set numbers to each category
def sec_to_num(argument):
switcher = {
'Aerospace/Defense': 0,
'Business Services': 1,
'Consumer Goods': 2,
'Education': 3,
'Energy/Resources': 4,
'Engineering': 5,
'Finance': 6,
'Food Production': 7,
'Government/Politics': 8,
'Healthcare/Wellness': 9,
'Insurance': 10,
'Legal': 11,
'Manufacturing': 12,
'Media/Entertainment': 13,
'Nonprofit/NGO': 14,
'Real Estate': 15,
'Retail': 16,
'Technology': 17,
'Telecommunications': 18,
'Tourism/Hospitality': 19,
'Transportation': 20,
'Unknown': 21,
'Utilities': 22,
}
return switcher.get(argument, 23)
#Set numbers to each size
def size_to_num(argument):
switcher = {
'0-100': 50,
'100-1000': 500,
'1000-10000': 5000,
'10000-50000': 50000,
'50000+': 100000,
'Unknown': 1,
}
return switcher.get(argument, 6)
#Set numbers to each size
def size_to_category(argument):
switcher = {
'0-100': 1,
'100-1000': 2,
'1000-10000': 3,
'10000-50000': 4,
'50000+': 5,
'Unknown': 0,
}
return switcher.get(argument, 6)
#Set category to each number
def num_to_sec(argument):
switcher = {
0:'Aerospace/Defense',
1:'Business Services',
2:'Consumer Goods',
3:'Education',
4:'Energy/Resources',
5:'Engineering',
6:'Finance',
7:'Food Production',
8:'Government/Politics',
9:'Healthcare/Wellness',
10:'Insurance',
11:'Legal',
12:'Manufacturing',
13:'Media/Entertainment',
14:'Nonprofit/NGO',
15:'Real Estate',
16:'Retail',
17:'Technology',
18:'Telecommunications',
19:'Tourism/Hospitality',
20:'Transportation',
21:'Unknown',
22:'Utilities',
}
return switcher.get(argument,23)
#Set numbers to each size
def num_to_size(argument):
switcher = {
0:'0-100',
1:'100-1000',
2:'1000-10000',
3:'10000-50000',
4:'50000+',
5:'Unknown',
}
return switcher.get(argument, 6)
# In[36]:
def in_list(item,L):
for i in L:
if item in i:
return L.index(i)
return num_classes - 1
def bot_to_vector(bot):
output = [0] * num_classes
output[in_list(bot, botnet_family)] = 1
return output
def included_entry(entry_name):
#if sector[entry_name] == 'Education':
# return False
#if sector[entry_name] == 'Technology':
# return False
#if sector[entry_name] == 'Tourism/Hospitality':
# return False
#if sector[entry_name] == 'Telecommunications':
# return False
#if sector[entry_name] == 'Unknown':
# return False
return True
def sectors_count(botnet_group):
sectors_count = [0]*23
res = dict()
for i in range(len(EID_set_new)):
if count_new[botnet_group,i] > 0:
sectors_count[sec_to_num(sector[EID_set_new[i]])] += count_new[botnet_group,i]
for i in range(23):
res[num_to_sec(i)] = sectors_count[i]
return res
def sectors_count_botnet(bot):
sectors_count = [0]*23
for i in range(len(EID_set_new)):
if count_new1[bot,i] > 0:
sectors_count[sec_to_num(sector[EID_set_new[i]])] += count_new1[bot,i]
return sectors_count
print(sum(included_entry(entity) for entity in EID_set))
#Normalizing the count matrix by dividing the attacks on each entity by its size
#for i in range(207):
# for j in range(5916):
# count[i,j] = (count[i,j] + 0.0) / (0.0 + size_to_num(count_range[EID_set[j]]))
#index = 2899
index = 4475
count_new1 = np.zeros((207,index))
#Build a new count matrix excluding the unwanted sectors
index = 0
EID_set_new = []
for i in range(len(EID_set)):
if included_entry(EID_set[i]) and (EID_set[i] not in drop_ent):
count_new1[:,index] = count[:,i]
EID_set_new.append(EID_set[i])
index += 1
print("Total number of entities: %d"%index)
count_new = np.zeros((num_classes,index))
#for i in range(len(botnet_set)):
# count_new[in_list(botnet_set[i], botnet_family) ,:] += count_new1[i,:]
for i in range(len(botnet_set)):
count_new[in_list(botnet_set[i], botnet_family) ,:] += count_new1[i,:]
print(count_new.shape)
sum_count_new = 0
for i in range(num_classes):
sum_count_new += sum(count_new[i,:])
print("Total number of attacks by botnets on the new entities : %d"%sum_count_new)
# In[ ]:
#Draw plots of bot-family sector
def draw_plots():
for i in range(num_classes-1):
x = range(23)
y = sectors_count(i).values()
labels = sectors_count(i).keys()
plt.figure(figsize=(16,18))
plt.plot(x, y, 'r-')
plt.title(("Group: %d. Contains botnets like: %s %s")%(i+1,botnet_family[i][0],botnet_family[i][1]))
plt.xticks(x, labels, rotation='vertical')
plt.savefig("Group_%d.png"%(i+1))
plt.close()
# In[37]:
count_new.shape
# In[43]:
#Create test, train data sets for deep net input/output
from sklearn.cross_validation import train_test_split
count_sec = [0]* 23
input1 = []
output = []
#18 is the number of sec that has any elements
#30 is the min that all sectors can get
while sum(count_sec) < 23*30:
i = random.randint(0, len(EID_set_new) - 1)
if count_sec[sec_to_num(sector[EID_set_new[i]])] >= 30:
continue
else:
input1.append(count_new[:,i])
sec_to_vec = [0]*23
sec_to_vec[sec_to_num(sector[EID_set_new[i]])] = 1
count_sec[sec_to_num(sector[EID_set_new[i]])] += 1
output.append(sec_to_vec)
print(len(input1), len(input1[0]))
print(len(output), len(output[0]))
# In[ ]:
# In[44]:
print(count_sec)
#Randomizing the input
inp = np.array(input1)
out = np.array(output)
X_train, X_test, Y_train, Y_test = train_test_split(inp, out, test_size=0.05, random_state=42)
print(len(X_train), len(X_test), len(Y_train), len(Y_test) )
# In[31]:
np.random.seed(1337) # for reproducibility
import theano
#import theano.sandbox.cuda
#theano.sandbox.cuda.use("gpu0")
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.optimizers import SGD, Adam, RMSprop
from keras.utils import np_utils
#training the sequential model
model = Sequential()
model.add(Dense(32, input_shape=(20,)))
model.add(Activation('tanh'))
model.add(Dropout(0.05))
#model.add(Dense(128))
#model.add(Activation('tanh'))
#model.add(Dropout(0.05))
#model.add(Dense(32))
#model.add(Activation('tanh'))
#model.add(Dropout(0.05))
model.add(Dense(64))
model.add(Activation('tanh'))
model.add(Dropout(0.05))
model.add(Dense(23))
model.add(Activation('softmax'))
model.summary()
batch_size = 16
nb_classes = 23
nb_epoch = 1000
try:
#target = open("NN_out.txt", 'w')
model.compile(loss='categorical_crossentropy', optimizer=SGD(), class_mode="categorical")
history = model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch,
verbose=1, validation_data=(X_test, Y_test))
except Exception,e:
print(str(e))
score = model.evaluate(X_test, Y_test, verbose=0)
print('Test score:', score)
p = model.predict(X_test)
yy = np.argmax(p, axis=1)
yyy = np.argmax(Y_test, axis=1)
a = np.equal(yy, yyy)
test_acc = ( 100.0 * (0.0 + sum(a)) / (len(a) + 0.0 ))
p = model.predict(X_train)
yy = np.argmax(p, axis=1)
yyy = np.argmax(Y_train, axis=1)
a = np.equal(yy, yyy)
train_acc = ( 100.0 * (0.0 + sum(a)) / (len(a) + 0.0 ))
print("NB_EPOCH : " , str(nb_epoch) , " Score: " , str(score) , " test accuracy: " , str(test_acc) , " Train accuracy: " , str(train_acc) + "\n")
#target.close()
# In[ ]:
print(yy)
# In[ ]:
print(yyy)
# In[ ]:
X_train[0]
# In[ ]:
ans = 0.0
p = model.predict(X_test)
yy = np.argmax(p, axis=1)
yyy = np.argmax(Y_test, axis=1)
for i in range(len(Y_test)):
pred = p[i].argsort()[-3:][::-1]
if np.argmax(Y_test[i]) in pred:
ans += 1.0
print("Top 3 prediction precision: %.2f"%(100.0*ans / (len(Y_test) + 0.0)))
| mit |
zaxliu/deepnap | experiments/kdd-exps/experiment_DynaQNN_Feb13_2358.py | 1 | 5180 | # System built-in modules
import time
from datetime import datetime
import sys
import os
from multiprocessing import Pool
# Project dependency modules
import pandas as pd
pd.set_option('mode.chained_assignment', None) # block warnings due to DataFrame value assignment
import lasagne
# Project modules
sys.path.append('../')
from sleep_control.traffic_emulator import TrafficEmulator
from sleep_control.traffic_server import TrafficServer
from sleep_control.controller import QController, DummyController, NController
from sleep_control.integration import Emulation
from sleep_control.env_models import SJTUModel
from rl.qtable import QAgent
from rl.qnn_theano import QAgentNN
from rl.mixin import PhiMixin, DynaMixin
sys_stdout = sys.stdout
log_prefix = '_'.join(['msg'] + os.path.basename(__file__).replace('.', '_').split('_')[1:4])
log_file_name = "{}_{}.log".format(log_prefix, sys.argv[1])
# Composite classes
class Dyna_QAgentNN(DynaMixin, QAgentNN):
def __init__(self, **kwargs):
super(Dyna_QAgentNN, self).__init__(**kwargs)
# Parameters
# |- Data
location = 'mdB'
# |- Agent
# |- QAgent
actions = [(True, None), (False, 'serve_all')]
gamma, alpha = 0.9, 0.9 # TD backup
explore_strategy, epsilon = 'epsilon', 0.02 # exploration
# |- QAgentNN
# | - Phi
# phi_length = 5
# dim_state = (1, phi_length, 3+2)
# range_state_slice = [(0, 10), (0, 10), (0, 10), (0, 1), (0, 1)]
# range_state = [[range_state_slice]*phi_length]
# | - No Phi
phi_length = 0
dim_state = (1, 1, 3)
range_state = ((((0, 10), (0, 10), (0, 10)),),)
# | - Other params
momentum, learning_rate = 0.9, 0.01 # SGD
num_buffer, memory_size, batch_size, update_period, freeze_period = 2, 200, 100, 4, 16
reward_scaling, reward_scaling_update, rs_period = 1, 'adaptive', 32 # reward scaling
# |- Env model
model_type, traffic_window_size = 'IPP', 50
stride, n_iter, adjust_offset = 2, 3, 1e-22
eval_period, eval_len = 4, 100
n_belief_bins, max_queue_len = 0, 20
Rs, Rw, Rf, Co, Cw = 1.0, -1.0, -10.0, -5.0, -0.5
traffic_params = (model_type, traffic_window_size,
stride, n_iter, adjust_offset,
eval_period, eval_len,
n_belief_bins)
queue_params = (max_queue_len,)
beta = 0.5 # R = (1-beta)*ServiceReward + beta*Cost
reward_params = (Rs, Rw, Rf, Co, Cw, beta)
# |- DynaQ
num_sim = 5
# |- Env
# |- Time
start_time = pd.to_datetime("2014-10-15 09:40:00")
total_time = pd.Timedelta(days=7)
time_step = pd.Timedelta(seconds=2)
backoff_epochs = num_buffer*memory_size+phi_length
head_datetime = start_time - time_step*backoff_epochs
tail_datetime = head_datetime + total_time
TOTAL_EPOCHS = int(total_time/time_step)
# |- Reward
rewarding = {'serve': Rs, 'wait': Rw, 'fail': Rf}
# load from processed data
session_df =pd.read_csv(
filepath_or_buffer='../data/trace_{}.dat'.format(location),
parse_dates=['startTime_datetime', 'endTime_datetime']
)
te = TrafficEmulator(
session_df=session_df, time_step=time_step,
head_datetime=head_datetime, tail_datetime=tail_datetime,
rewarding=rewarding,
verbose=2)
ts = TrafficServer(cost=(Co, Cw), verbose=2)
env_model = SJTUModel(traffic_params, queue_params, reward_params, 2)
agent = Dyna_QAgentNN(
env_model=env_model, num_sim=num_sim,
dim_state=dim_state, range_state=range_state,
f_build_net = None,
batch_size=batch_size, learning_rate=learning_rate, momentum=momentum,
reward_scaling=reward_scaling, reward_scaling_update=reward_scaling_update, rs_period=rs_period,
update_period=update_period, freeze_period=freeze_period,
memory_size=memory_size, num_buffer=num_buffer,
# Below is QAgent params
actions=actions, alpha=alpha, gamma=gamma,
explore_strategy=explore_strategy, epsilon=epsilon,
verbose=2)
c = QController(agent=agent)
emu = Emulation(te=te, ts=ts, c=c, beta=beta)
# Heavyliftings
t = time.time()
sys.stdout = sys_stdout
log_path = './log/'
if os.path.isfile(log_path+log_file_name):
print "Log file {} already exist. Experiment cancelled.".format(log_file_name)
else:
log_file = open(log_path+log_file_name,"w")
print datetime.now().strftime('[%Y-%m-%d %H:%M:%S]'),
print '{}%'.format(int(100.0*emu.epoch/TOTAL_EPOCHS)),
print log_file_name
time.sleep(1)
sys.stdout = log_file
while emu.epoch is not None and emu.epoch<TOTAL_EPOCHS:
# log time
print "Epoch {},".format(emu.epoch),
left = emu.te.head_datetime + emu.te.epoch*emu.te.time_step
right = left + emu.te.time_step
print "{} - {}".format(left.strftime("%Y-%m-%d %H:%M:%S"), right.strftime("%Y-%m-%d %H:%M:%S"))
emu.step()
print
if emu.epoch%(0.05*TOTAL_EPOCHS)==0:
sys.stdout = sys_stdout
print datetime.now().strftime('[%Y-%m-%d %H:%M:%S]'),
print '{}%'.format(int(100.0*emu.epoch/TOTAL_EPOCHS)),
print log_file_name
time.sleep(1)
sys.stdout = log_file
sys.stdout = sys_stdout
log_file.close()
print
print log_file_name,
print '{:.3f} sec,'.format(time.time()-t),
print '{:.3f} min'.format((time.time()-t)/60)
| bsd-3-clause |
cl4rke/scikit-learn | sklearn/cross_decomposition/tests/test_pls.py | 215 | 11427 | import numpy as np
from sklearn.utils.testing import (assert_array_almost_equal,
assert_array_equal, assert_true, assert_raise_message)
from sklearn.datasets import load_linnerud
from sklearn.cross_decomposition import pls_
from nose.tools import assert_equal
def test_pls():
d = load_linnerud()
X = d.data
Y = d.target
# 1) Canonical (symmetric) PLS (PLS 2 blocks canonical mode A)
# ===========================================================
# Compare 2 algo.: nipals vs. svd
# ------------------------------
pls_bynipals = pls_.PLSCanonical(n_components=X.shape[1])
pls_bynipals.fit(X, Y)
pls_bysvd = pls_.PLSCanonical(algorithm="svd", n_components=X.shape[1])
pls_bysvd.fit(X, Y)
# check equalities of loading (up to the sign of the second column)
assert_array_almost_equal(
pls_bynipals.x_loadings_,
np.multiply(pls_bysvd.x_loadings_, np.array([1, -1, 1])), decimal=5,
err_msg="nipals and svd implementation lead to different x loadings")
assert_array_almost_equal(
pls_bynipals.y_loadings_,
np.multiply(pls_bysvd.y_loadings_, np.array([1, -1, 1])), decimal=5,
err_msg="nipals and svd implementation lead to different y loadings")
# Check PLS properties (with n_components=X.shape[1])
# ---------------------------------------------------
plsca = pls_.PLSCanonical(n_components=X.shape[1])
plsca.fit(X, Y)
T = plsca.x_scores_
P = plsca.x_loadings_
Wx = plsca.x_weights_
U = plsca.y_scores_
Q = plsca.y_loadings_
Wy = plsca.y_weights_
def check_ortho(M, err_msg):
K = np.dot(M.T, M)
assert_array_almost_equal(K, np.diag(np.diag(K)), err_msg=err_msg)
# Orthogonality of weights
# ~~~~~~~~~~~~~~~~~~~~~~~~
check_ortho(Wx, "x weights are not orthogonal")
check_ortho(Wy, "y weights are not orthogonal")
# Orthogonality of latent scores
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
check_ortho(T, "x scores are not orthogonal")
check_ortho(U, "y scores are not orthogonal")
# Check X = TP' and Y = UQ' (with (p == q) components)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# center scale X, Y
Xc, Yc, x_mean, y_mean, x_std, y_std =\
pls_._center_scale_xy(X.copy(), Y.copy(), scale=True)
assert_array_almost_equal(Xc, np.dot(T, P.T), err_msg="X != TP'")
assert_array_almost_equal(Yc, np.dot(U, Q.T), err_msg="Y != UQ'")
# Check that rotations on training data lead to scores
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Xr = plsca.transform(X)
assert_array_almost_equal(Xr, plsca.x_scores_,
err_msg="rotation on X failed")
Xr, Yr = plsca.transform(X, Y)
assert_array_almost_equal(Xr, plsca.x_scores_,
err_msg="rotation on X failed")
assert_array_almost_equal(Yr, plsca.y_scores_,
err_msg="rotation on Y failed")
# "Non regression test" on canonical PLS
# --------------------------------------
# The results were checked against the R-package plspm
pls_ca = pls_.PLSCanonical(n_components=X.shape[1])
pls_ca.fit(X, Y)
x_weights = np.array(
[[-0.61330704, 0.25616119, -0.74715187],
[-0.74697144, 0.11930791, 0.65406368],
[-0.25668686, -0.95924297, -0.11817271]])
assert_array_almost_equal(pls_ca.x_weights_, x_weights)
x_rotations = np.array(
[[-0.61330704, 0.41591889, -0.62297525],
[-0.74697144, 0.31388326, 0.77368233],
[-0.25668686, -0.89237972, -0.24121788]])
assert_array_almost_equal(pls_ca.x_rotations_, x_rotations)
y_weights = np.array(
[[+0.58989127, 0.7890047, 0.1717553],
[+0.77134053, -0.61351791, 0.16920272],
[-0.23887670, -0.03267062, 0.97050016]])
assert_array_almost_equal(pls_ca.y_weights_, y_weights)
y_rotations = np.array(
[[+0.58989127, 0.7168115, 0.30665872],
[+0.77134053, -0.70791757, 0.19786539],
[-0.23887670, -0.00343595, 0.94162826]])
assert_array_almost_equal(pls_ca.y_rotations_, y_rotations)
# 2) Regression PLS (PLS2): "Non regression test"
# ===============================================
# The results were checked against the R-packages plspm, misOmics and pls
pls_2 = pls_.PLSRegression(n_components=X.shape[1])
pls_2.fit(X, Y)
x_weights = np.array(
[[-0.61330704, -0.00443647, 0.78983213],
[-0.74697144, -0.32172099, -0.58183269],
[-0.25668686, 0.94682413, -0.19399983]])
assert_array_almost_equal(pls_2.x_weights_, x_weights)
x_loadings = np.array(
[[-0.61470416, -0.24574278, 0.78983213],
[-0.65625755, -0.14396183, -0.58183269],
[-0.51733059, 1.00609417, -0.19399983]])
assert_array_almost_equal(pls_2.x_loadings_, x_loadings)
y_weights = np.array(
[[+0.32456184, 0.29892183, 0.20316322],
[+0.42439636, 0.61970543, 0.19320542],
[-0.13143144, -0.26348971, -0.17092916]])
assert_array_almost_equal(pls_2.y_weights_, y_weights)
y_loadings = np.array(
[[+0.32456184, 0.29892183, 0.20316322],
[+0.42439636, 0.61970543, 0.19320542],
[-0.13143144, -0.26348971, -0.17092916]])
assert_array_almost_equal(pls_2.y_loadings_, y_loadings)
# 3) Another non-regression test of Canonical PLS on random dataset
# =================================================================
# The results were checked against the R-package plspm
n = 500
p_noise = 10
q_noise = 5
# 2 latents vars:
np.random.seed(11)
l1 = np.random.normal(size=n)
l2 = np.random.normal(size=n)
latents = np.array([l1, l1, l2, l2]).T
X = latents + np.random.normal(size=4 * n).reshape((n, 4))
Y = latents + np.random.normal(size=4 * n).reshape((n, 4))
X = np.concatenate(
(X, np.random.normal(size=p_noise * n).reshape(n, p_noise)), axis=1)
Y = np.concatenate(
(Y, np.random.normal(size=q_noise * n).reshape(n, q_noise)), axis=1)
np.random.seed(None)
pls_ca = pls_.PLSCanonical(n_components=3)
pls_ca.fit(X, Y)
x_weights = np.array(
[[0.65803719, 0.19197924, 0.21769083],
[0.7009113, 0.13303969, -0.15376699],
[0.13528197, -0.68636408, 0.13856546],
[0.16854574, -0.66788088, -0.12485304],
[-0.03232333, -0.04189855, 0.40690153],
[0.1148816, -0.09643158, 0.1613305],
[0.04792138, -0.02384992, 0.17175319],
[-0.06781, -0.01666137, -0.18556747],
[-0.00266945, -0.00160224, 0.11893098],
[-0.00849528, -0.07706095, 0.1570547],
[-0.00949471, -0.02964127, 0.34657036],
[-0.03572177, 0.0945091, 0.3414855],
[0.05584937, -0.02028961, -0.57682568],
[0.05744254, -0.01482333, -0.17431274]])
assert_array_almost_equal(pls_ca.x_weights_, x_weights)
x_loadings = np.array(
[[0.65649254, 0.1847647, 0.15270699],
[0.67554234, 0.15237508, -0.09182247],
[0.19219925, -0.67750975, 0.08673128],
[0.2133631, -0.67034809, -0.08835483],
[-0.03178912, -0.06668336, 0.43395268],
[0.15684588, -0.13350241, 0.20578984],
[0.03337736, -0.03807306, 0.09871553],
[-0.06199844, 0.01559854, -0.1881785],
[0.00406146, -0.00587025, 0.16413253],
[-0.00374239, -0.05848466, 0.19140336],
[0.00139214, -0.01033161, 0.32239136],
[-0.05292828, 0.0953533, 0.31916881],
[0.04031924, -0.01961045, -0.65174036],
[0.06172484, -0.06597366, -0.1244497]])
assert_array_almost_equal(pls_ca.x_loadings_, x_loadings)
y_weights = np.array(
[[0.66101097, 0.18672553, 0.22826092],
[0.69347861, 0.18463471, -0.23995597],
[0.14462724, -0.66504085, 0.17082434],
[0.22247955, -0.6932605, -0.09832993],
[0.07035859, 0.00714283, 0.67810124],
[0.07765351, -0.0105204, -0.44108074],
[-0.00917056, 0.04322147, 0.10062478],
[-0.01909512, 0.06182718, 0.28830475],
[0.01756709, 0.04797666, 0.32225745]])
assert_array_almost_equal(pls_ca.y_weights_, y_weights)
y_loadings = np.array(
[[0.68568625, 0.1674376, 0.0969508],
[0.68782064, 0.20375837, -0.1164448],
[0.11712173, -0.68046903, 0.12001505],
[0.17860457, -0.6798319, -0.05089681],
[0.06265739, -0.0277703, 0.74729584],
[0.0914178, 0.00403751, -0.5135078],
[-0.02196918, -0.01377169, 0.09564505],
[-0.03288952, 0.09039729, 0.31858973],
[0.04287624, 0.05254676, 0.27836841]])
assert_array_almost_equal(pls_ca.y_loadings_, y_loadings)
# Orthogonality of weights
# ~~~~~~~~~~~~~~~~~~~~~~~~
check_ortho(pls_ca.x_weights_, "x weights are not orthogonal")
check_ortho(pls_ca.y_weights_, "y weights are not orthogonal")
# Orthogonality of latent scores
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
check_ortho(pls_ca.x_scores_, "x scores are not orthogonal")
check_ortho(pls_ca.y_scores_, "y scores are not orthogonal")
def test_PLSSVD():
# Let's check the PLSSVD doesn't return all possible component but just
# the specificied number
d = load_linnerud()
X = d.data
Y = d.target
n_components = 2
for clf in [pls_.PLSSVD, pls_.PLSRegression, pls_.PLSCanonical]:
pls = clf(n_components=n_components)
pls.fit(X, Y)
assert_equal(n_components, pls.y_scores_.shape[1])
def test_univariate_pls_regression():
# Ensure 1d Y is correctly interpreted
d = load_linnerud()
X = d.data
Y = d.target
clf = pls_.PLSRegression()
# Compare 1d to column vector
model1 = clf.fit(X, Y[:, 0]).coef_
model2 = clf.fit(X, Y[:, :1]).coef_
assert_array_almost_equal(model1, model2)
def test_predict_transform_copy():
# check that the "copy" keyword works
d = load_linnerud()
X = d.data
Y = d.target
clf = pls_.PLSCanonical()
X_copy = X.copy()
Y_copy = Y.copy()
clf.fit(X, Y)
# check that results are identical with copy
assert_array_almost_equal(clf.predict(X), clf.predict(X.copy(), copy=False))
assert_array_almost_equal(clf.transform(X), clf.transform(X.copy(), copy=False))
# check also if passing Y
assert_array_almost_equal(clf.transform(X, Y),
clf.transform(X.copy(), Y.copy(), copy=False))
# check that copy doesn't destroy
# we do want to check exact equality here
assert_array_equal(X_copy, X)
assert_array_equal(Y_copy, Y)
# also check that mean wasn't zero before (to make sure we didn't touch it)
assert_true(np.all(X.mean(axis=0) != 0))
def test_scale():
d = load_linnerud()
X = d.data
Y = d.target
# causes X[:, -1].std() to be zero
X[:, -1] = 1.0
for clf in [pls_.PLSCanonical(), pls_.PLSRegression(),
pls_.PLSSVD()]:
clf.set_params(scale=True)
clf.fit(X, Y)
def test_pls_errors():
d = load_linnerud()
X = d.data
Y = d.target
for clf in [pls_.PLSCanonical(), pls_.PLSRegression(),
pls_.PLSSVD()]:
clf.n_components = 4
assert_raise_message(ValueError, "Invalid number of components", clf.fit, X, Y)
| bsd-3-clause |
bibsian/database-development | poplerGUI/ui_logic_session.py | 1 | 3959 | #!/usr/bin/env python
from PyQt4 import QtGui, QtCore
from Views import ui_dialog_session as dsess
from poplerGUI import class_inputhandler as ini
from poplerGUI import class_modelviewpandas as view
class SessionDialog(QtGui.QDialog, dsess.Ui_Dialog):
'''
Dialog box prompts the user to inpute
unique metadata relating to the file that
will be loaded into the database. Pass this information
into the mainwindow once it has been verified as
correct/raw data file is uploaded.
Any input generated from this table get directed to the user
facade class.
'''
raw_data_model = QtCore.pyqtSignal(object)
webview_url = QtCore.pyqtSignal(object)
def __init__(self, parent=None):
super().__init__(parent)
self.setupUi(self)
# Attributes
self.metaini = None
self.fileini = None
self.verify = None
# User facade composed from main window
self.facade = None
# Signal
self.btnVerifyMeta.clicked.connect(self.meta_handler)
self.btnSelectFile.clicked.connect(self.file_handler)
self.btnCancel.clicked.connect(self.close)
self.btnSaveClose.clicked.connect(self.close)
# Status Message boxes
self.error = QtGui.QErrorMessage()
self.message = QtGui.QMessageBox
def meta_handler(self):
'''
Method to verify the user input regarding the metadata
record (globalid/url) they'll be working from.
Url is passed to main window up verification.Note,
take user input and wraps it in a InputHandler class.
'''
entries = {
'globalid': int(self.lnedGlobalId.text().strip()),
'metaurl': self.lnedMetadataUrl.text().strip(),
'lter': self.cboxLTERloc.currentText().strip()
}
self.metaini = ini.InputHandler(
name='metacheck', tablename=None,
lnedentry=entries, verify=self.verify)
self.facade.input_register(self.metaini)
try:
print(self.metaini.lnedentry['metaurl'])
self.facade.meta_verify()
self.webview_url.emit(
self.metaini.lnedentry['metaurl'])
self.message.about(self, 'Status', 'Entries recorded')
except Exception as e:
print(str(e))
self.error.showMessage('Invalid entries: ' + str(e))
raise LookupError('Invalid metadata entries')
def file_handler(self):
'''
Method to pass the user input about the file to load.
Note this is only tested for CSV file: Still have
to run more test to make it more flexible. Note,
takes user input and wraps it in a InputHandler class.
'''
lned = {
'sheet': str(self.lnedExcelSheet.text().strip()),
'delim': str(self.lnedDelimiter.text().strip()),
'tskip': str(self.lnedSkipTop.text().strip()),
'bskip': str(self.lnedSkipBottom.text().strip()),
'header': ''
}
rbtn = {
'csv': self.rbtnCsv.isChecked(),
'xlsx': self.rbtnExcel.isChecked(),
'txt': self.rbtnTxt.isChecked()
}
name = str(QtGui.QFileDialog.getOpenFileName(
self, 'Select File'))
headers = self.ckHeader.isChecked()
self.fileini = ini.InputHandler(
name='fileoptions', tablename=None,
rbtns=rbtn, lnedentry=lned, filename=name,
checks=headers
)
self.facade.input_register(self.fileini)
try:
self.facade.load_data()
self.raw_data_model.emit('loaded_data')
except Exception as e:
raise IOError('Could not load data')
@QtCore.pyqtSlot(object)
def info_updates(self, message):
'''
Method to display message updates from mainwindow
'''
self.message.about(self, 'Status', message)
| mit |
janelia-flyem/gala | tests/test_gala.py | 1 | 11479 | import os
import glob
import functools
from contextlib import contextmanager
import pytest
from numpy.testing import assert_allclose
import numpy as np
from scipy import ndimage as ndi
from sklearn.linear_model import LogisticRegression
import subprocess as sp
from gala import imio, features, agglo, evaluate as ev
LR = functools.partial(LogisticRegression, solver='liblinear')
@contextmanager
def tar_extract(fn):
sp.call(['tar', '-xzf', fn + '.tar.gz'])
ext_fn = os.path.basename(fn)
yield ext_fn
os.remove(ext_fn)
for sub_fn in glob.glob(ext_fn + '_*'):
os.remove(sub_fn)
rundir = os.path.dirname(__file__)
### fixtures
def dummy_data_source():
frag = np.arange(1, 17, dtype=int).reshape((4, 4))
gt = np.array([[1, 1, 2, 2], [1, 1, 2, 2], [3] * 4, [3] * 4], dtype=int)
fman = features.base.Mock(frag, gt)
g = agglo.Rag(frag, feature_manager=fman, use_slow=True)
return frag, gt, g, fman
@pytest.fixture
def dummy_data():
return dummy_data_source()
@pytest.fixture
def dummy_data_fast(dummy_data):
frag, gt, _, fman = dummy_data
frag = ndi.zoom(frag, 2, order=0)
gt = ndi.zoom(gt, 2, order=0)
g = agglo.Rag(frag, feature_manager=fman)
return frag, gt, g, fman
### tests
def test_generate_flat_learning_edges(dummy_data):
"""Run a flat epoch and ensure all edges are correctly represented."""
frag, gt, g, fman = dummy_data
feat, target, weights, edges = g.learn_flat(gt, fman)
assert feat.shape == (24, 2)
assert tuple(edges[0]) == (1, 2)
assert tuple(edges[-1]) == (15, 16)
assert np.sum(target[:, 0] == 1) == 6 # number of non-merge edges
def test_generate_flat_learning_edges_fast(dummy_data_fast):
"""Run a flat epoch and ensure all edges are correctly represented."""
frag, gt, g, fman = dummy_data_fast
feat, target, weights, edges = g.learn_flat(gt, fman)
assert feat.shape == (24, 2)
assert tuple(edges[0]) == (1, 2)
assert tuple(edges[-1]) == (15, 16)
assert np.sum(target[:, 0] == 1) == 6 # number of non-merge edges
def test_generate_lash_examples(dummy_data):
"""Run a flat epoch and an active epoch of learning, compare learned sets.
The mock feature manager places all merge examples at (0, 0) in feature
space, and all non-merge examples at (1, 0), *in flat learning*. During
agglomeration, non-merge examples go to (0, 1), which confuses the flat
classifier (which has only learned the difference along the first feature
dimension).
This test checks for those differences in learning using a simple
logistic regression.
"""
frag, gt, g, fman = dummy_data
np.random.seed(99)
summary, allepochs = g.learn_agglomerate(gt, fman,
learning_mode='permissive',
classifier='logistic regression',
min_num_epochs=5)
feat, target, weights, edges = summary
ffeat, ftarget, fweights, fedges = allepochs[0] # flat
lr = LR().fit(feat, target[:, 0])
flr = LR().fit(ffeat, ftarget[:, 0])
def pred(v):
return lr.predict_proba([v])[0, 1]
def fpred(v):
return flr.predict_proba([v])[0, 1]
assert len(allepochs[1][0]) == 15 # number of merges is |nodes| - 1
# approx. same learning results at (0., 0.) and (1., 0.)
print([(fpred(i), pred(i)) for i in [[0, 0], [1, 0], [0, 1]]])
assert_allclose(fpred([0, 0]), 0.2, atol=0.1)
assert_allclose(pred([0, 0]), 0.2, atol=0.1)
assert_allclose(fpred([1, 0]), 0.65, atol=0.1)
assert_allclose(pred([1, 0]), 0.65, atol=0.1)
# difference between agglomerative and flat learning in point (0., 1.)
assert_allclose(fpred([0, 1]), 0.2, atol=0.1)
assert_allclose(pred([0, 1]), 0.6, atol=0.1)
def test_generate_lash_examples_fast(dummy_data_fast):
"""Run a flat epoch and an active epoch of learning, compare learned sets.
The mock feature manager places all merge examples at (0, 0) in feature
space, and all non-merge examples at (1, 0), *in flat learning*. During
agglomeration, non-merge examples go to (0, 1), which confuses the flat
classifier (which has only learned the difference along the first feature
dimension).
This test checks for those differences in learning using a simple
logistic regression.
"""
frag, gt, g, fman = dummy_data_fast
np.random.seed(99)
summary, allepochs = g.learn_agglomerate(gt, fman,
learning_mode='permissive',
classifier='logistic regression',
min_num_epochs=5)
feat, target, weights, edges = summary
ffeat, ftarget, fweights, fedges = allepochs[0] # flat
lr = LR().fit(feat, target[:, 0])
flr = LR().fit(ffeat, ftarget[:, 0])
def pred(v):
return lr.predict_proba([v])[0, 1]
def fpred(v):
return flr.predict_proba([v])[0, 1]
assert len(allepochs[1][0]) == 15 # number of merges is |nodes| - 1
# approx. same learning results at (0., 0.) and (1., 0.)
print([(fpred(i), pred(i)) for i in [[0, 0], [1, 0], [0, 1]]])
assert_allclose(fpred([0, 0]), 0.2, atol=0.2)
assert_allclose(pred([0, 0]), 0.2, atol=0.2)
assert_allclose(fpred([1, 0]), 0.65, atol=0.15)
assert_allclose(pred([1, 0]), 0.65, atol=0.15)
# difference between agglomerative and flat learning in point (0., 1.)
assert_allclose(fpred([0, 1]), 0.2, atol=0.2) # < 0.4
assert_allclose(pred([0, 1]), 0.65, atol=0.2) # > 0.45
def test_generate_gala_examples(dummy_data):
"""As `test_generate_lash_examples`, but using strict learning. """
frag, gt, g, fman = dummy_data
np.random.seed(99)
summary, allepochs = g.learn_agglomerate(gt, fman,
learning_mode='strict',
classifier='logistic regression',
min_num_epochs=5)
feat, target, weights, edges = summary
ffeat, ftarget, fweights, fedges = allepochs[0] # flat
lr = LR().fit(feat, target[:, 0])
flr = LR().fit(ffeat, ftarget[:, 0])
def pred(v):
return lr.predict_proba([v])[0, 1]
def fpred(v):
return flr.predict_proba([v])[0, 1]
assert len(allepochs[1][0]) > 15 # number of merges is more than LASH
# approx. same learning results at (0., 0.) and (1., 0.)
assert_allclose(fpred([0, 0]), 0.2, atol=0.1)
assert_allclose(pred([0, 0]), 0.2, atol=0.1)
assert_allclose(fpred([1, 0]), 0.64, atol=0.1)
assert_allclose(pred([1, 0]), 0.64, atol=0.1)
# difference between agglomerative and flat learning in point (0., 1.);
# greater separation than with LASH
assert_allclose(fpred([0, 1]), 0.2, atol=0.1)
assert_allclose(pred([0, 1]), 0.7, atol=0.1)
def test_generate_gala_examples_fast_updateedges(dummy_data_fast):
"""As `test_generate_lash_examples`, but using strict learning. """
frag, gt, g, fman = dummy_data_fast
g = agglo.Rag(frag, feature_manager=fman, update_unchanged_edges=True)
np.random.seed(99)
summary, allepochs = g.learn_agglomerate(gt, fman,
learning_mode='strict',
classifier='logistic regression')
feat, target, weights, edges = summary
ffeat, ftarget, fweights, fedges = allepochs[0] # flat
lr = LR().fit(feat, target[:, 0])
flr = LR().fit(ffeat, ftarget[:, 0])
def pred(v):
return lr.predict_proba([v])[0, 1]
def fpred(v):
return flr.predict_proba([v])[0, 1]
assert len(allepochs[1][0]) > 15 # number of merges is more than LASH
# approx. same learning results at (0., 0.) and (1., 0.)
assert_allclose(fpred([0, 0]), 0.2, atol=0.2)
assert_allclose(pred([0, 0]), 0.2, atol=0.2)
assert_allclose(fpred([1, 0]), 0.65, atol=0.15)
assert_allclose(pred([1, 0]), 0.65, atol=0.15)
# difference between agglomerative and flat learning in point (0., 1.);
# greater separation than with LASH
assert_allclose(fpred([0, 1]), 0.2, atol=0.15)
assert_allclose(pred([0, 1]), 0.7, atol=0.15)
def test_generate_gala_examples_fast(dummy_data_fast):
"""As `test_generate_lash_examples`, but using strict learning. """
frag, gt, g, fman = dummy_data_fast
np.random.seed(99)
summary, allepochs = g.learn_agglomerate(gt, fman,
learning_mode='strict',
classifier='logistic regression',
min_num_epochs=5)
feat, target, weights, edges = summary
ffeat, ftarget, fweights, fedges = allepochs[0] # flat
lr = LR().fit(feat, target[:, 0])
flr = LR().fit(ffeat, ftarget[:, 0])
def pred(v):
return lr.predict_proba([v])[0, 1]
def fpred(v):
return flr.predict_proba([v])[0, 1]
assert len(allepochs[1][0]) > 15 # number of merges is more than LASH
# approx. same learning results at (0., 0.) and (1., 0.)
assert_allclose(fpred([0, 0]), 0.2, atol=0.2)
assert_allclose(pred([0, 0]), 0.2, atol=0.2)
assert_allclose(fpred([1, 0]), 0.65, atol=0.15)
assert_allclose(pred([1, 0]), 0.65, atol=0.15)
# difference between agglomerative and flat learning in point (0., 1.);
# greater separation than with LASH
assert_allclose(fpred([0, 1]), 0.2, atol=0.15)
assert_allclose(pred([0, 1]), 0.7, atol=0.15)
def test_segment_with_gala_classifer(dummy_data_fast):
frag, gt, g, fman = dummy_data_fast
np.random.seed(5)
summary, allepochs = g.learn_agglomerate(gt, fman,
learning_mode='strict',
classifier='logistic regression',
min_num_epochs=5)
feat, target, weights, edges = summary
ffeat, ftarget, fweights, fedges = allepochs[0] # flat
lr = LR().fit(feat, target[:, 0])
gala_policy = agglo.classifier_probability(fman, lr)
flr = LR().fit(ffeat, ftarget[:, 0])
flat_policy = agglo.classifier_probability(fman, flr)
gtest = agglo.Rag(frag, feature_manager=fman,
merge_priority_function=gala_policy)
gtest.agglomerate(0.5)
assert ev.vi(gtest.get_segmentation(), gt) == 0
gtest_flat = agglo.Rag(frag, feature_manager=fman,
merge_priority_function=flat_policy)
assert ev.vi(gtest_flat.get_segmentation(0.5), gt) == 1.5
def test_split_vi():
ws_test = imio.read_h5_stack(
os.path.join(rundir, 'example-data/test-ws.lzf.h5'))
gt_test = imio.read_h5_stack(
os.path.join(rundir, 'example-data/test-gt.lzf.h5'))
seg_test1 = imio.read_h5_stack(
os.path.join(rundir, 'example-data/test-seg1.lzf.h5'))
seg_test4 = imio.read_h5_stack(
os.path.join(rundir, 'example-data/test-seg4.lzf.h5'))
result = np.vstack((
ev.split_vi(ws_test, gt_test),
ev.split_vi(seg_test1, gt_test),
ev.split_vi(seg_test4, gt_test)
))
expected = np.load(os.path.join(rundir, 'example-data/vi-results.npy'))
assert_allclose(result, expected, atol=1e-6)
if __name__ == '__main__':
np.random.RandomState(0)
from numpy import testing
testing.run_module_suite()
| bsd-3-clause |
jakejhansen/minesweeper_solver | policy_gradients/test_condensed_v4.py | 1 | 8720 | # review solution
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from tensorflow.python.ops.nn import relu, softmax
import gym
import sys
import os
sys.path.append('../')
from minesweeper_tk import Minesweeper
model = "condensed_6x6_v4"
# training settings
epochs = 100000 # number of training batches
batch_size = 200 # number of timesteps in a batch
rollout_limit = 50 # max rollout length
discount_factor = 0 # reward discount factor (gamma), 1.0 = no discount
learning_rate = 0.000002 # you know this by now #0.001,
#4400: 56% win --> LR: 0.0003
#5600: 69% win --> LR: 0.0001
#7200: 74% win --> LR: 0.00003
#8400: 77% win --> LR: 0.00001
#9600: 75% win --> LR: 0.000005
#10400: 75% win --> LR: 0.000002
early_stop_loss = 0 # stop training if loss < early_stop_loss, 0 or False to disable
""" condensed
epochs = 100000 # number of training batches
batch_size = 400 # number of timesteps in a batch
rollout_limit = 50 # max rollout length
discount_factor = 0 # reward discount factor (gamma), 1.0 = no discount
learning_rate = 0.00004 # you know this by now #0.0005
early_stop_loss = 0 # stop training if loss < early_stop_loss, 0 or False to disable
"""
""" 261 epocs to learn 2 specific board (overfit)
epochs = 10000 # number of training batches
batch_size = 200 # number of timesteps in a batch
rollout_limit = 130 # max rollout length
discount_factor = 0 # reward discount factor (gamma), 1.0 = no discount
learning_rate = 0.001 # you know this by now
early_stop_loss = 0 # stop training if loss < early_stop_loss, 0 or False to disable
"""
# setup policy network
n = 6
n_inputs = 6*6*2
n_hidden = 6*6*8
n_hidden2 = 220
n_hidden3 = 220
n_hidden4 = 220
n_outputs = 6*6
dropout = 0
tf.reset_default_graph()
states_pl = tf.placeholder(tf.float32, [None, n_inputs], name='states_pl')
actions_pl = tf.placeholder(tf.int32, [None, 2], name='actions_pl')
advantages_pl = tf.placeholder(tf.float32, [None], name='advantages_pl')
learning_rate_pl = tf.placeholder(tf.float32, name='learning_rate_pl')
input_layer = tf.reshape(states_pl, [-1, n, n, 2])
conv1 = tf.layers.conv2d(inputs=input_layer,filters=18,kernel_size=[5, 5],padding="same", activation=tf.nn.relu)
conv2 = tf.layers.conv2d(inputs=conv1,filters=36,kernel_size=[3, 3],padding="same", activation=tf.nn.relu)
conv2_flat = tf.contrib.layers.flatten(conv2)
l_hidden = tf.layers.dense(inputs=conv2_flat, units=n_hidden, activation=relu, name='l_hidden')
l_hidden2 = tf.layers.dense(inputs=l_hidden, units=n_hidden2, activation=relu, name='l_hidden2')
l_hidden2 = tf.layers.dropout(l_hidden2, rate=dropout)
l_hidden3 = tf.layers.dense(inputs=l_hidden2, units=n_hidden3, activation=relu, name='l_hidden3')
l_hidden3 = tf.layers.dropout(l_hidden3, rate=dropout)
#l_hidden4 = tf.layers.dense(inputs=l_hidden3, units=n_hidden4, activation=relu, name='l_hidden4')
l_hidden3 = tf.layers.dropout(l_hidden3, rate=dropout)
l_out = tf.layers.dense(inputs=l_hidden3, units=n_outputs, activation=softmax, name='l_out')
# print network
print('states_pl:', states_pl.get_shape())
print('actions_pl:', actions_pl.get_shape())
print('advantages_pl:', advantages_pl.get_shape())
print('l_hidden:', l_hidden.get_shape())
print('l_hidden2:', l_hidden2.get_shape())
print('l_hidden3:', l_hidden3.get_shape())
print('l_out:', l_out.get_shape())
# define loss and optimizer
loss_f = -tf.reduce_mean(tf.multiply(tf.log(tf.gather_nd(l_out, actions_pl)), advantages_pl))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate_pl, beta1=0.8, beta2=0.92)
train_f = optimizer.minimize(loss_f)
saver = tf.train.Saver() # we use this later to save the model
# test forward pass
from minesweeper_tk import Minesweeper
env = Minesweeper(display=False, ROWS = 6, COLS = 6, MINES = 7, OUT = "CONDENSED", rewards = {"win" : 1, "loss" : -1, "progress" : 0.9, "noprogress" : -0.3, "YOLO" : -0.3})
state = env.stateConverter(env.get_state()).flatten()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
action_probabilities = sess.run(fetches=l_out, feed_dict={states_pl: [state]})
print(action_probabilities)
# helper functions
def get_rollout(sess, env, rollout_limit=None, stochastic=False, seed=None):
"""Generate rollout by iteratively evaluating the current policy on the environment."""
rollout_limit = rollout_limit
env.reset()
s = env.stateConverter(env.get_state()).flatten()
states, actions, rewards = [], [], []
for i in range(rollout_limit):
a = get_action(sess, s, stochastic)
s1, r, done, _ = env.step(a)
states.append(s)
actions.append(a)
rewards.append(r)
s = s1
if done: break
return states, actions, rewards, i+1
def get_action(sess, state, stochastic=False):
"""Choose an action, given a state, with the current policy network."""
# get action probabilities
a_prob = sess.run(fetches=l_out, feed_dict={states_pl: np.atleast_2d(state)})
#valid_moves = env.get_validMoves()
#a_prob[~valid_moves.flatten().reshape(1,36)] = 0
#a_prob[a_prob < 0.00001] = 0.000001
#a_prob / np.sum(a_prob)
#a_prob = normalize(a_prob, norm = 'l1')
#if abs(1-np.sum(a_prob)) > 0.01:
# a_prob = sess.run(fetches=l_out, feed_dict={states_pl: np.atleast_2d(state)})
if stochastic:
# sample action from distribution
return (np.cumsum(np.asarray(a_prob)) > np.random.rand()).argmax()
else:
# select action with highest probability
return a_prob.argmax()
def get_advantages(rewards, rollout_limit, discount_factor, eps=1e-12):
"""Compute advantages"""
returns = get_returns(rewards, rollout_limit, discount_factor)
# standardize columns of returns to get advantages
advantages = (returns - np.mean(returns, axis=0)) / (np.std(returns, axis=0) + eps)
# restore original rollout lengths
advantages = [adv[:len(rewards[i])] for i, adv in enumerate(advantages)]
return advantages
def get_returns(rewards, rollout_limit, discount_factor):
"""Compute the cumulative discounted rewards, a.k.a. returns."""
returns = np.zeros((len(rewards), rollout_limit))
for i, r in enumerate(rewards):
returns[i, len(r) - 1] = r[-1]
for j in reversed(range(len(r)-1)):
returns[i,j] = r[j] + discount_factor * returns[i,j+1]
return returns
import time
if __name__ == "__main__":
import time
display = False
env = Minesweeper(display=display, ROWS = 6, COLS = 6, MINES = 6, OUT = "CONDENSED", rewards = {"win" : 1, "loss" : -1, "progress" : 0.1, "noprogress" : -0.3, "YOLO": -0.3})
i = 0
#start = time.time()
with tf.Session() as sess:
saver = tf.train.Saver()
saver.restore(sess, "{}/{}_best.ckpt".format(model,model))
#view = Viewer(env, custom_render=True)
games = 0
moves = 0
stuck = 0
won_games = 0
lost_games = 0
while games < 10000:
if games % 500 == 0:
print(games)
r = 1
r_prev = 1
while True:
if display:
input()
s = env.stateConverter(env.get_state()).flatten()
if r < 0:
a = get_action(sess, s, stochastic=True)
else:
a = get_action(sess, s, stochastic=False)
moves += 1
r_prev = r
s, r, done, _ = env.step(a)
s = s.flatten()
if display:
print("Reward = ", r)
#print("\nReward = {}".format(r))
if r == 1:
won_games += 1
if r == -1:
lost_games += 1
if done:
games += 1
env.reset()
moves = 0
break
elif moves >= 30:
stuck += 1
games += 1
env.lost = env.lost + 1
env.reset()
moves = 0
break
#print(env.lost)
#print(env.won)
print("games: {}, won: {}, lost: {}, stuck: {}, win_rate : {:.1f}%".format(games, won_games, lost_games, stuck, won_games/games * 100))
#view.render(close=True, display_gif=True)
#69% win-rate for 54000 epochs
| mit |
darothen/pyrcel | pyrcel/postprocess.py | 2 | 2032 | """ Collection of output post-processing routines.
"""
import numpy as np
import pandas as pd
from .activation import binned_activation
def simulation_activation(model, parcel_df, aerosols_panel):
""" Given the DataFrame output from a parcel model simulation, compute
activation kinetic limitation diagnostics.
Parameters
----------
model : ParcelModel
The ParcelModel
parcel_df : DataFrame used to generate the results to be analyzed
The DataFrame containing the parcel's thermodynamic trajectory
aerosols_panel : Panel
A Panel collection of DataFrames containing the aerosol size evolution
Returns
-------
act_stats : DataFrame
A DataFrame containing the activation statistics
"""
initial_row = parcel_df.iloc[0]
Smax_i, T_i = initial_row["S"], initial_row["T"]
acts = {"eq": [], "kn": [], "alpha": [], "phi": []}
initial_aerosols = model.aerosols
N_all_modes = np.sum([aer.total_N for aer in initial_aerosols])
N_fracs = {
aer.species: aer.total_N / N_all_modes for aer in initial_aerosols
}
for i in range(len(parcel_df)):
row_par = parcel_df.iloc[i]
rows_aer = {key: aerosols_panel[key].iloc[i] for key in aerosols_panel}
# Update thermo
T_i = row_par["T"]
if row_par["S"] > Smax_i:
Smax_i = row_par["S"]
eq_tot, kn_tot, alpha_tot, phi_tot = 0.0, 0.0, 0.0, 0.0
for aerosol in initial_aerosols:
N_frac = N_fracs[aerosol.species]
rs = rows_aer[aerosol.species]
eq, kn, alpha, phi = binned_activation(Smax_i, T_i, rs, aerosol)
eq_tot += eq * N_frac
kn_tot += kn * N_frac
alpha_tot += alpha * N_frac
phi_tot += phi * N_frac
acts["kn"].append(kn_tot)
acts["eq"].append(eq_tot)
acts["alpha"].append(alpha_tot)
acts["phi"].append(phi_tot)
acts_total = pd.DataFrame(acts, index=parcel_df.index)
return acts_total
| bsd-3-clause |
nehz/keras | tests/manual/check_callbacks.py | 82 | 7540 | import numpy as np
import random
import theano
from keras.models import Sequential
from keras.callbacks import Callback
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.regularizers import l2
from keras.layers.convolutional import Convolution2D, MaxPooling2D
from keras.utils import np_utils
from keras.datasets import mnist
import keras.callbacks as cbks
from matplotlib import pyplot as plt
from matplotlib import animation
##############################
# model DrawActivations test #
##############################
print('Running DrawActivations test')
nb_classes = 10
batch_size = 128
nb_epoch = 10
max_train_samples = 512
max_test_samples = 1
np.random.seed(1337)
# the data, shuffled and split between tran and test sets
(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train = X_train.reshape(-1,1,28,28)[:max_train_samples]
X_train = X_train.astype("float32")
X_train /= 255
X_test = X_test.reshape(-1,1,28,28)[:max_test_samples]
X_test = X_test.astype("float32")
X_test /= 255
# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, nb_classes)[:max_train_samples]
class Frames(object):
def __init__(self, n_plots=16):
self._n_frames = 0
self._framedata = []
self._titles = []
for i in range(n_plots):
self._framedata.append([])
def add_frame(self, i, frame):
self._framedata[i].append(frame)
def set_title(self, title):
self._titles.append(title)
class SubplotTimedAnimation(animation.TimedAnimation):
def __init__(self, fig, frames, grid=(4, 4), interval=10, blit=False, **kwargs):
self.n_plots = grid[0] * grid[1]
self.axes = [fig.add_subplot(grid[0], grid[1], i + 1) for i in range(self.n_plots)]
for axis in self.axes:
axis.get_xaxis().set_ticks([])
axis.get_yaxis().set_ticks([])
self.frames = frames
self.imgs = [self.axes[i].imshow(frames._framedata[i][0], interpolation='nearest', cmap='bone') for i in range(self.n_plots)]
self.title = fig.suptitle('')
super(SubplotTimedAnimation, self).__init__(fig, interval=interval, blit=blit, **kwargs)
def _draw_frame(self, j):
for i in range(self.n_plots):
self.imgs[i].set_data(self.frames._framedata[i][j])
if len(self.frames._titles) > j:
self.title.set_text(self.frames._titles[j])
self._drawn_artists = self.imgs
def new_frame_seq(self):
return iter(range(len(self.frames._framedata[0])))
def _init_draw(self):
for img in self.imgs:
img.set_data([[]])
def combine_imgs(imgs, grid=(1,1)):
n_imgs, img_h, img_w = imgs.shape
if n_imgs != grid[0] * grid[1]:
raise ValueError()
combined = np.zeros((grid[0] * img_h, grid[1] * img_w))
for i in range(grid[0]):
for j in range(grid[1]):
combined[img_h*i:img_h*(i+1),img_w*j:img_w*(j+1)] = imgs[grid[0] * i + j]
return combined
class DrawActivations(Callback):
def __init__(self, figsize):
self.fig = plt.figure(figsize=figsize)
def on_train_begin(self, logs={}):
self.imgs = Frames(n_plots=5)
layers_0_ids = np.random.choice(32, 16, replace=False)
self.test_layer0 = theano.function([self.model.get_input()], self.model.layers[1].get_output(train=False)[0, layers_0_ids])
layers_1_ids = np.random.choice(64, 36, replace=False)
self.test_layer1 = theano.function([self.model.get_input()], self.model.layers[5].get_output(train=False)[0, layers_1_ids])
self.test_layer2 = theano.function([self.model.get_input()], self.model.layers[10].get_output(train=False)[0])
def on_epoch_begin(self, epoch, logs={}):
self.epoch = epoch
def on_batch_end(self, batch, logs={}):
if batch % 5 == 0:
self.imgs.add_frame(0, X_test[0,0])
self.imgs.add_frame(1, combine_imgs(self.test_layer0(X_test), grid=(4, 4)))
self.imgs.add_frame(2, combine_imgs(self.test_layer1(X_test), grid=(6, 6)))
self.imgs.add_frame(3, self.test_layer2(X_test).reshape((16,16)))
self.imgs.add_frame(4, self.model._predict(X_test)[0].reshape((1,10)))
self.imgs.set_title('Epoch #%d - Batch #%d' % (self.epoch, batch))
def on_train_end(self, logs={}):
anim = SubplotTimedAnimation(self.fig, self.imgs, grid=(1,5), interval=10, blit=False, repeat_delay=1000)
# anim.save('test_gif.gif', fps=15, writer='imagemagick')
plt.show()
# model = Sequential()
# model.add(Dense(784, 50))
# model.add(Activation('relu'))
# model.add(Dense(50, 10))
# model.add(Activation('softmax'))
model = Sequential()
model.add(Convolution2D(32, 1, 3, 3, border_mode='full'))
model.add(Activation('relu'))
model.add(MaxPooling2D(poolsize=(2, 2)))
model.add(Dropout(0.25))
model.add(Convolution2D(64, 32, 3, 3, border_mode='full'))
model.add(Activation('relu'))
model.add(MaxPooling2D(poolsize=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(64*8*8, 256))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(256, 10, W_regularizer = l2(0.1)))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
# Fit the model
draw_weights = DrawActivations(figsize=(5.4, 1.35))
model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=1, callbacks=[draw_weights])
##########################
# model checkpoint tests #
##########################
print('Running ModelCheckpoint test')
nb_classes = 10
batch_size = 128
nb_epoch = 20
# small sample size to overfit on training data
max_train_samples = 50
max_test_samples = 1000
np.random.seed(1337) # for reproducibility
# the data, shuffled and split between tran and test sets
(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train = X_train.reshape(60000,784)[:max_train_samples]
X_test = X_test.reshape(10000,784)[:max_test_samples]
X_train = X_train.astype("float32")
X_test = X_test.astype("float32")
X_train /= 255
X_test /= 255
# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, nb_classes)[:max_train_samples]
Y_test = np_utils.to_categorical(y_test, nb_classes)[:max_test_samples]
# Create a slightly larger network than required to test best validation save only
model = Sequential()
model.add(Dense(784, 500))
model.add(Activation('relu'))
model.add(Dense(500, 10))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
# test file location
path = "/tmp"
filename = "model_weights.hdf5"
import os
f = os.path.join(path, filename)
print("Test model checkpointer")
# only store best validation model in checkpointer
checkpointer = cbks.ModelCheckpoint(filepath=f, verbose=1, save_best_only=True)
model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=0, validation_data=(X_test, Y_test), callbacks =[checkpointer])
if not os.path.isfile(f):
raise Exception("Model weights were not saved to %s" % (f))
print("Test model checkpointer without validation data")
import warnings
warnings.filterwarnings('error')
try:
# this should issue a warning
model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=0, callbacks =[checkpointer])
except:
print("Tests passed")
import sys
sys.exit(0)
raise Exception("Modelcheckpoint tests did not pass")
| mit |
WarrenWeckesser/scikits-image | doc/ext/plot_directive.py | 89 | 20530 | """
A special directive for generating a matplotlib plot.
.. warning::
This is a hacked version of plot_directive.py from Matplotlib.
It's very much subject to change!
Usage
-----
Can be used like this::
.. plot:: examples/example.py
.. plot::
import matplotlib.pyplot as plt
plt.plot([1,2,3], [4,5,6])
.. plot::
A plotting example:
>>> import matplotlib.pyplot as plt
>>> plt.plot([1,2,3], [4,5,6])
The content is interpreted as doctest formatted if it has a line starting
with ``>>>``.
The ``plot`` directive supports the options
format : {'python', 'doctest'}
Specify the format of the input
include-source : bool
Whether to display the source code. Default can be changed in conf.py
and the ``image`` directive options ``alt``, ``height``, ``width``,
``scale``, ``align``, ``class``.
Configuration options
---------------------
The plot directive has the following configuration options:
plot_include_source
Default value for the include-source option
plot_pre_code
Code that should be executed before each plot.
plot_basedir
Base directory, to which plot:: file names are relative to.
(If None or empty, file names are relative to the directoly where
the file containing the directive is.)
plot_formats
File formats to generate. List of tuples or strings::
[(suffix, dpi), suffix, ...]
that determine the file format and the DPI. For entries whose
DPI was omitted, sensible defaults are chosen.
plot_html_show_formats
Whether to show links to the files in HTML.
TODO
----
* Refactor Latex output; now it's plain images, but it would be nice
to make them appear side-by-side, or in floats.
"""
from __future__ import division, absolute_import, print_function
import sys, os, glob, shutil, imp, warnings, re, textwrap, traceback
import sphinx
if sys.version_info[0] >= 3:
from io import StringIO
else:
from io import StringIO
import warnings
warnings.warn("A plot_directive module is also available under "
"matplotlib.sphinxext; expect this numpydoc.plot_directive "
"module to be deprecated after relevant features have been "
"integrated there.",
FutureWarning, stacklevel=2)
#------------------------------------------------------------------------------
# Registration hook
#------------------------------------------------------------------------------
def setup(app):
setup.app = app
setup.config = app.config
setup.confdir = app.confdir
app.add_config_value('plot_pre_code', '', True)
app.add_config_value('plot_include_source', False, True)
app.add_config_value('plot_formats', ['png', 'hires.png', 'pdf'], True)
app.add_config_value('plot_basedir', None, True)
app.add_config_value('plot_html_show_formats', True, True)
app.add_directive('plot', plot_directive, True, (0, 1, False),
**plot_directive_options)
#------------------------------------------------------------------------------
# plot:: directive
#------------------------------------------------------------------------------
from docutils.parsers.rst import directives
from docutils import nodes
def plot_directive(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
return run(arguments, content, options, state_machine, state, lineno)
plot_directive.__doc__ = __doc__
def _option_boolean(arg):
if not arg or not arg.strip():
# no argument given, assume used as a flag
return True
elif arg.strip().lower() in ('no', '0', 'false'):
return False
elif arg.strip().lower() in ('yes', '1', 'true'):
return True
else:
raise ValueError('"%s" unknown boolean' % arg)
def _option_format(arg):
return directives.choice(arg, ('python', 'lisp'))
def _option_align(arg):
return directives.choice(arg, ("top", "middle", "bottom", "left", "center",
"right"))
plot_directive_options = {'alt': directives.unchanged,
'height': directives.length_or_unitless,
'width': directives.length_or_percentage_or_unitless,
'scale': directives.nonnegative_int,
'align': _option_align,
'class': directives.class_option,
'include-source': _option_boolean,
'format': _option_format,
}
#------------------------------------------------------------------------------
# Generating output
#------------------------------------------------------------------------------
from docutils import nodes, utils
try:
# Sphinx depends on either Jinja or Jinja2
import jinja2
def format_template(template, **kw):
return jinja2.Template(template).render(**kw)
except ImportError:
import jinja
def format_template(template, **kw):
return jinja.from_string(template, **kw)
TEMPLATE = """
{{ source_code }}
{{ only_html }}
{% if source_link or (html_show_formats and not multi_image) %}
(
{%- if source_link -%}
`Source code <{{ source_link }}>`__
{%- endif -%}
{%- if html_show_formats and not multi_image -%}
{%- for img in images -%}
{%- for fmt in img.formats -%}
{%- if source_link or not loop.first -%}, {% endif -%}
`{{ fmt }} <{{ dest_dir }}/{{ img.basename }}.{{ fmt }}>`__
{%- endfor -%}
{%- endfor -%}
{%- endif -%}
)
{% endif %}
{% for img in images %}
.. figure:: {{ build_dir }}/{{ img.basename }}.png
{%- for option in options %}
{{ option }}
{% endfor %}
{% if html_show_formats and multi_image -%}
(
{%- for fmt in img.formats -%}
{%- if not loop.first -%}, {% endif -%}
`{{ fmt }} <{{ dest_dir }}/{{ img.basename }}.{{ fmt }}>`__
{%- endfor -%}
)
{%- endif -%}
{% endfor %}
{{ only_latex }}
{% for img in images %}
.. image:: {{ build_dir }}/{{ img.basename }}.pdf
{% endfor %}
"""
class ImageFile(object):
def __init__(self, basename, dirname):
self.basename = basename
self.dirname = dirname
self.formats = []
def filename(self, format):
return os.path.join(self.dirname, "%s.%s" % (self.basename, format))
def filenames(self):
return [self.filename(fmt) for fmt in self.formats]
def run(arguments, content, options, state_machine, state, lineno):
if arguments and content:
raise RuntimeError("plot:: directive can't have both args and content")
document = state_machine.document
config = document.settings.env.config
options.setdefault('include-source', config.plot_include_source)
# determine input
rst_file = document.attributes['source']
rst_dir = os.path.dirname(rst_file)
if arguments:
if not config.plot_basedir:
source_file_name = os.path.join(rst_dir,
directives.uri(arguments[0]))
else:
source_file_name = os.path.join(setup.confdir, config.plot_basedir,
directives.uri(arguments[0]))
code = open(source_file_name, 'r').read()
output_base = os.path.basename(source_file_name)
else:
source_file_name = rst_file
code = textwrap.dedent("\n".join(map(str, content)))
counter = document.attributes.get('_plot_counter', 0) + 1
document.attributes['_plot_counter'] = counter
base, ext = os.path.splitext(os.path.basename(source_file_name))
output_base = '%s-%d.py' % (base, counter)
base, source_ext = os.path.splitext(output_base)
if source_ext in ('.py', '.rst', '.txt'):
output_base = base
else:
source_ext = ''
# ensure that LaTeX includegraphics doesn't choke in foo.bar.pdf filenames
output_base = output_base.replace('.', '-')
# is it in doctest format?
is_doctest = contains_doctest(code)
if 'format' in options:
if options['format'] == 'python':
is_doctest = False
else:
is_doctest = True
# determine output directory name fragment
source_rel_name = relpath(source_file_name, setup.confdir)
source_rel_dir = os.path.dirname(source_rel_name)
while source_rel_dir.startswith(os.path.sep):
source_rel_dir = source_rel_dir[1:]
# build_dir: where to place output files (temporarily)
build_dir = os.path.join(os.path.dirname(setup.app.doctreedir),
'plot_directive',
source_rel_dir)
if not os.path.exists(build_dir):
os.makedirs(build_dir)
# output_dir: final location in the builder's directory
dest_dir = os.path.abspath(os.path.join(setup.app.builder.outdir,
source_rel_dir))
# how to link to files from the RST file
dest_dir_link = os.path.join(relpath(setup.confdir, rst_dir),
source_rel_dir).replace(os.path.sep, '/')
build_dir_link = relpath(build_dir, rst_dir).replace(os.path.sep, '/')
source_link = dest_dir_link + '/' + output_base + source_ext
# make figures
try:
results = makefig(code, source_file_name, build_dir, output_base,
config)
errors = []
except PlotError as err:
reporter = state.memo.reporter
sm = reporter.system_message(
2, "Exception occurred in plotting %s: %s" % (output_base, err),
line=lineno)
results = [(code, [])]
errors = [sm]
# generate output restructuredtext
total_lines = []
for j, (code_piece, images) in enumerate(results):
if options['include-source']:
if is_doctest:
lines = ['']
lines += [row.rstrip() for row in code_piece.split('\n')]
else:
lines = ['.. code-block:: python', '']
lines += [' %s' % row.rstrip()
for row in code_piece.split('\n')]
source_code = "\n".join(lines)
else:
source_code = ""
opts = [':%s: %s' % (key, val) for key, val in list(options.items())
if key in ('alt', 'height', 'width', 'scale', 'align', 'class')]
only_html = ".. only:: html"
only_latex = ".. only:: latex"
if j == 0:
src_link = source_link
else:
src_link = None
result = format_template(
TEMPLATE,
dest_dir=dest_dir_link,
build_dir=build_dir_link,
source_link=src_link,
multi_image=len(images) > 1,
only_html=only_html,
only_latex=only_latex,
options=opts,
images=images,
source_code=source_code,
html_show_formats=config.plot_html_show_formats)
total_lines.extend(result.split("\n"))
total_lines.extend("\n")
if total_lines:
state_machine.insert_input(total_lines, source=source_file_name)
# copy image files to builder's output directory
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
for code_piece, images in results:
for img in images:
for fn in img.filenames():
shutil.copyfile(fn, os.path.join(dest_dir,
os.path.basename(fn)))
# copy script (if necessary)
if source_file_name == rst_file:
target_name = os.path.join(dest_dir, output_base + source_ext)
f = open(target_name, 'w')
f.write(unescape_doctest(code))
f.close()
return errors
#------------------------------------------------------------------------------
# Run code and capture figures
#------------------------------------------------------------------------------
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.image as image
from matplotlib import _pylab_helpers
import exceptions
def contains_doctest(text):
try:
# check if it's valid Python as-is
compile(text, '<string>', 'exec')
return False
except SyntaxError:
pass
r = re.compile(r'^\s*>>>', re.M)
m = r.search(text)
return bool(m)
def unescape_doctest(text):
"""
Extract code from a piece of text, which contains either Python code
or doctests.
"""
if not contains_doctest(text):
return text
code = ""
for line in text.split("\n"):
m = re.match(r'^\s*(>>>|\.\.\.) (.*)$', line)
if m:
code += m.group(2) + "\n"
elif line.strip():
code += "# " + line.strip() + "\n"
else:
code += "\n"
return code
def split_code_at_show(text):
"""
Split code at plt.show()
"""
parts = []
is_doctest = contains_doctest(text)
part = []
for line in text.split("\n"):
if (not is_doctest and line.strip() == 'plt.show()') or \
(is_doctest and line.strip() == '>>> plt.show()'):
part.append(line)
parts.append("\n".join(part))
part = []
else:
part.append(line)
if "\n".join(part).strip():
parts.append("\n".join(part))
return parts
class PlotError(RuntimeError):
pass
def run_code(code, code_path, ns=None):
# Change the working directory to the directory of the example, so
# it can get at its data files, if any.
pwd = os.getcwd()
old_sys_path = list(sys.path)
if code_path is not None:
dirname = os.path.abspath(os.path.dirname(code_path))
os.chdir(dirname)
sys.path.insert(0, dirname)
# Redirect stdout
stdout = sys.stdout
sys.stdout = StringIO()
# Reset sys.argv
old_sys_argv = sys.argv
sys.argv = [code_path]
try:
try:
code = unescape_doctest(code)
if ns is None:
ns = {}
if not ns:
exec(setup.config.plot_pre_code, ns)
exec(code, ns)
except (Exception, SystemExit) as err:
raise PlotError(traceback.format_exc())
finally:
os.chdir(pwd)
sys.argv = old_sys_argv
sys.path[:] = old_sys_path
sys.stdout = stdout
return ns
#------------------------------------------------------------------------------
# Generating figures
#------------------------------------------------------------------------------
def out_of_date(original, derived):
"""
Returns True if derivative is out-of-date wrt original,
both of which are full file paths.
"""
return (not os.path.exists(derived)
or os.stat(derived).st_mtime < os.stat(original).st_mtime)
def makefig(code, code_path, output_dir, output_base, config):
"""
Run a pyplot script *code* and save the images under *output_dir*
with file names derived from *output_base*
"""
# -- Parse format list
default_dpi = {'png': 80, 'hires.png': 200, 'pdf': 50}
formats = []
for fmt in config.plot_formats:
if isinstance(fmt, str):
formats.append((fmt, default_dpi.get(fmt, 80)))
elif type(fmt) in (tuple, list) and len(fmt)==2:
formats.append((str(fmt[0]), int(fmt[1])))
else:
raise PlotError('invalid image format "%r" in plot_formats' % fmt)
# -- Try to determine if all images already exist
code_pieces = split_code_at_show(code)
# Look for single-figure output files first
all_exists = True
img = ImageFile(output_base, output_dir)
for format, dpi in formats:
if out_of_date(code_path, img.filename(format)):
all_exists = False
break
img.formats.append(format)
if all_exists:
return [(code, [img])]
# Then look for multi-figure output files
results = []
all_exists = True
for i, code_piece in enumerate(code_pieces):
images = []
for j in range(1000):
img = ImageFile('%s_%02d_%02d' % (output_base, i, j), output_dir)
for format, dpi in formats:
if out_of_date(code_path, img.filename(format)):
all_exists = False
break
img.formats.append(format)
# assume that if we have one, we have them all
if not all_exists:
all_exists = (j > 0)
break
images.append(img)
if not all_exists:
break
results.append((code_piece, images))
if all_exists:
return results
# -- We didn't find the files, so build them
results = []
ns = {}
for i, code_piece in enumerate(code_pieces):
# Clear between runs
plt.close('all')
# Run code
run_code(code_piece, code_path, ns)
# Collect images
images = []
fig_managers = _pylab_helpers.Gcf.get_all_fig_managers()
for j, figman in enumerate(fig_managers):
if len(fig_managers) == 1 and len(code_pieces) == 1:
img = ImageFile(output_base, output_dir)
else:
img = ImageFile("%s_%02d_%02d" % (output_base, i, j),
output_dir)
images.append(img)
for format, dpi in formats:
try:
figman.canvas.figure.savefig(img.filename(format), dpi=dpi)
except exceptions.BaseException as err:
raise PlotError(traceback.format_exc())
img.formats.append(format)
# Results
results.append((code_piece, images))
return results
#------------------------------------------------------------------------------
# Relative pathnames
#------------------------------------------------------------------------------
try:
from os.path import relpath
except ImportError:
# Copied from Python 2.7
if 'posix' in sys.builtin_module_names:
def relpath(path, start=os.path.curdir):
"""Return a relative version of a path"""
from os.path import sep, curdir, join, abspath, commonprefix, \
pardir
if not path:
raise ValueError("no path specified")
start_list = abspath(start).split(sep)
path_list = abspath(path).split(sep)
# Work out how much of the filepath is shared by start and path.
i = len(commonprefix([start_list, path_list]))
rel_list = [pardir] * (len(start_list)-i) + path_list[i:]
if not rel_list:
return curdir
return join(*rel_list)
elif 'nt' in sys.builtin_module_names:
def relpath(path, start=os.path.curdir):
"""Return a relative version of a path"""
from os.path import sep, curdir, join, abspath, commonprefix, \
pardir, splitunc
if not path:
raise ValueError("no path specified")
start_list = abspath(start).split(sep)
path_list = abspath(path).split(sep)
if start_list[0].lower() != path_list[0].lower():
unc_path, rest = splitunc(path)
unc_start, rest = splitunc(start)
if bool(unc_path) ^ bool(unc_start):
raise ValueError("Cannot mix UNC and non-UNC paths (%s and %s)"
% (path, start))
else:
raise ValueError("path is on drive %s, start on drive %s"
% (path_list[0], start_list[0]))
# Work out how much of the filepath is shared by start and path.
for i in range(min(len(start_list), len(path_list))):
if start_list[i].lower() != path_list[i].lower():
break
else:
i += 1
rel_list = [pardir] * (len(start_list)-i) + path_list[i:]
if not rel_list:
return curdir
return join(*rel_list)
else:
raise RuntimeError("Unsupported platform (no relpath available!)")
| bsd-3-clause |
X-DataInitiative/tick | tick/dataset/fetch_url_dataset.py | 2 | 3229 | # License: BSD 3 clause
import os
import tarfile
import numpy as np
import scipy
from sklearn.datasets import load_svmlight_file
from tick.dataset.download_helper import download_dataset, get_data_home
dataset_path = 'url/url_svmlight.tar.gz'
dataset_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/url/url_svmlight.tar.gz'
_N_FEATURES = 3231961
def load_url_dataset_day(cache_path, days):
"""Loads url dataset from a tar file
Parameters
----------
cache_path : `str`
Path to the tar file
days : `list` or `range`
Days to be loaded
Returns
-------
X : `np.ndarray`
A sparse matrix containing the features
y : `np.ndarray`
An array containing the labels
"""
tar_file = tarfile.open(cache_path, "r:gz")
X, y = None, None
for day in days:
data_filename = 'url_svmlight/Day{}.svm'.format(day)
with tar_file.extractfile(data_filename) as data_file:
X_day, y_day = load_svmlight_file(data_file,
n_features=_N_FEATURES)
if X is None:
X, y = X_day, y_day
else:
X = scipy.sparse.vstack((X, X_day))
y = np.hstack((y, y_day))
return X, y
def download_url_dataset(data_home=None, verbose=False):
"""Downloads URL dataset and stores it locally
Parameters
----------
data_home : `str`, optional, default=None
Specify a download and cache folder for the datasets. If None
and not configured with TICK_DATASETS environement variable
all tick datasets are stored in '~/tick_datasets' subfolders.
verbose : `bool`, default=True
If True, download progress bar will be printed
Returns
-------
cache_path : `str`
File path of the downloaded data
"""
return download_dataset(dataset_url, dataset_path, data_home=data_home,
verbose=verbose)
def fetch_url_dataset(n_days=120, data_home=None, verbose=True):
"""Loads URL dataset
Uses cache if this dataset has already been downloaded.
Parameters
----------
data_home : `str`, optional, default=None
Specify a download and cache folder for the datasets. If None
and not configured with TICK_DATASETS environement variable
all tick datasets are stored in '~/tick_datasets' subfolders.
verbose : `bool`, default=True
If True, download progress bar will be printed
Returns
-------
X : `np.ndarray`
A sparse matrix containing the features
y : `np.ndarray`
An array containing the labels
"""
data_home = get_data_home(data_home)
cache_path = os.path.join(data_home, dataset_path)
dataset = None
if os.path.exists(cache_path):
try:
dataset = load_url_dataset_day(cache_path, range(n_days))
except Exception as e:
print(80 * '_')
print('Cache loading failed')
print(80 * '_')
print(e)
if dataset is None:
download_url_dataset(data_home=data_home, verbose=verbose)
dataset = load_url_dataset_day(cache_path, range(n_days))
return dataset
| bsd-3-clause |
tjhei/burnman_old2 | burnman/seismic.py | 2 | 9383 | # BurnMan - a lower mantle toolkit
# Copyright (C) 2012, 2013, Heister, T., Unterborn, C., Rose, I. and Cottaar, S.
# Released under GPL v2 or later.
import numpy as np
import tools
import matplotlib.pyplot as plt
import math
class seismic_data:
"""
base class for all seismic models
"""
def __init__(self):
pass
# depth in km
def internal_depth_list(self):
""" returns a sorted list of depths where this seismic data is computed at """
return np.arange(0.,6000.0, 100.0)
def evaluate_all_at(self, depth_list):
""" returns pressure[Pa], density[kg/m^3], Vp[m/s], Vs[m/s] and Vphi[m/s] for a list of depths[m] """
pressures = np.array([self.pressure(r) for r in depth_list])
density = np.array([self.density(r) for r in depth_list])
v_p = np.array([self.v_p(r) for r in depth_list])
v_s = np.array([self.v_s(r) for r in depth_list])
v_phi = np.array([self.v_phi(r) for r in depth_list])
return pressures, density, v_p, v_s, v_phi
def pressure(self, depth):
raise ValueError, "not implemented"
return 0
def v_p(self, depth):
raise ValueError, "not implemented"
return 0
def v_s(self, depth):
raise ValueError, "not implemented"
return 0
def v_phi(self, depth):
raise ValueError, "not implemented"
return 0
def density(self, depth):
raise ValueError, "not implemented"
return 0
def depth(self, pressure):
raise ValueError, "not implemented"
return -1
class radiustable(seismic_data):
"""
this is a base class that gets the information from a table indexed and
sorted by radius. Fill the tables in the constructor after deriving
from this class.
Note: all tables need to be sorted by increasing radius.
Alternatively, you can also overwrite the _lookup function if you
want to access with something else like depth than radius.
"""
def __init__(self):
seismic_data.__init__(self)
self.table_radius = []
self.table_pressure = []
self.table_density = []
self.table_vp = []
self.table_vs = []
self.earth_radius = 6371.0e3
def internal_depth_list(self):
return (self.earth_radius - self.table_radius)[::-1] #radius is sorted in increasing order, so we need to reverse the depth list
def pressure(self, depth):
return self._lookup(depth, self.table_pressure)
def v_p(self, depth):
return self._lookup(depth, self.table_vp)
def v_s(self, depth):
return self._lookup(depth, self.table_vs)
def v_phi(self, depth):
v_s=self.v_s(depth)
v_p=self.v_p(depth)
return math.sqrt(v_p*v_p-4./3.*v_s*v_s)
def density(self, depth):
return self._lookup(depth, self.table_density)
def depth(self, pressure):
radius = tools.lookup_and_interpolate(self.table_pressure[::-1], self.table_radius[::-1], pressure)
return self.earth_radius - radius
def _lookup(self, depth, value_table):
radius = self.earth_radius - depth
return tools.lookup_and_interpolate(self.table_radius, value_table, radius)
class prem(radiustable):
"""
reads in the table for PREM (input_seismic/prem_table.txt) using the base class radiustable
"""
def __init__(self):
radiustable.__init__(self)
table = tools.read_table("input_seismic/prem_table.txt") # radius, pressure, density, v_p, v_s
table = np.array(table)
self.table_radius = table[:,0]
self.table_pressure = table[:,1]
self.table_density = table[:,2]
self.table_vp = table[:,3]
self.table_vs = table[:,4]
def grav(self,depths):
table = tools.read_table("input_seismic/grav_for_PREM.txt") # radius, g
table = np.array(table)
table_rad = table[:,0]
table_g = table[:,1]
return np.interp(self.earth_radius-depths, table_rad,table_g)
class slow(radiustable):
"""
Inserts the mean profiles for slower regions in the lower mantle (Lekic et al. 2012).
We need to stitch together three tables. Note that prem_lowermantle has a wider range,
so we cut away rows at the top and bottom. Interpolation is not necessary,
because all tables where generated with at the same depths
"""
def __init__(self):
radiustable.__init__(self)
table = tools.read_table("input_seismic/prem_lowermantle.txt")#data is: radius pressure density V_p V_s Q_K Q_G
table = np.array(table)
table[:,0] = table[:,0]
table2 = tools.read_table("input_seismic/swave_slow.txt")
table2 = np.array(table2)
table3 = tools.read_table("input_seismic/pwave_slow.txt")
table3 = np.array(table3)
min_radius = self.earth_radius-max(table2[:,0])
max_radius = self.earth_radius-min(table2[:,0])
table=np.array(filter(lambda x: (x[0]>=min_radius and x[0]<=max_radius), table))
assert(len(table) == len(table2))
assert(len(table) == len(table3))
self.table_radius = table[:,0]
self.table_pressure = table[:,1]
self.table_density = table[:,2]
self.table_vp = table3[:,1]
self.table_vs = table2[:,1]
class fast(radiustable):
"""
Inserts the mean profiles for faster regions in the lower mantle (Lekic et al. 2012).
We need to stitch together three tables. Note that prem_lowermantle has a wider range,
so we cut away rows at the top and bottom. Interpolation is not necessary,
because all tables where generated with at the same depths
"""
def __init__(self):
radiustable.__init__(self)
table = tools.read_table("input_seismic/prem_lowermantle.txt")#data is: radius pressure density V_p V_s Q_K Q_G
table = np.array(table)
table[:,0] = table[:,0]
table2 = tools.read_table("input_seismic/swave_fast.txt")
table2 = np.array(table2)
table3 = tools.read_table("input_seismic/pwave_fast.txt")
table3 = np.array(table3)
min_radius = self.earth_radius-max(table2[:,0])
max_radius = self.earth_radius-min(table2[:,0])
table=np.array(filter(lambda x: (x[0]>=min_radius and x[0]<=max_radius), table))
assert(len(table) == len(table2))
assert(len(table) == len(table3))
self.table_radius = table[:,0]
self.table_pressure = table[:,1]
self.table_density = table[:,2]
self.table_vp = table3[:,1]
self.table_vs = table2[:,1]
# this uses prem_lowermantle table
class prem_test(radiustable):
def __init__(self):
radiustable.__init__(self)
table = tools.read_table("input_seismic/prem_lowermantle.txt")#data is: radius pressure density V_p V_s Q_K Q_G
table = np.array(table)
self.table_radius = table[:,0]
self.table_pressure = table[:,1]
self.table_density = table[:,2]
self.table_vp = table[:,3]
self.table_vs = table[:,4]
def attenuation_correction(v_p,v_s,v_phi,Qs,Qphi):
"""
Applies the attenuation correction following Matas et al. (2007), page 4. This is a minor effect on the velocities
"""
beta = 0.3 # Matas et al. (2007) page 4
Qp = 3./4.*pow((v_p/v_s),2.)*Qs # Matas et al. (2007) page 4
cot=1./np.tan(beta*np.pi/2.)
v_p = v_p*(1.-1./2.*cot*1./Qp) # Matas et al. (2007) page 1
v_s = v_s*(1.-1./2.*cot*1./Qs)
v_phi= v_phi*(1.-1./2.*cot*1./Qphi)
return v_p, v_s, v_phi
# shared variable of prem, so that other routines do not need to create
# prem over and over. See geotherm for example.
prem_model = prem()
if __name__ == "__main__":
#create a seismic dataset from prem:
s=prem()
depths = s.internal_depth_list()
pressures, density, v_p, v_s, v_phi = s.evaluate_all_at(depths)
print depths, pressures, density, v_p, v_s, v_phi
# specify where we want to evaluate, here we map from pressure to depth, because we can
#p = np.arange(1e9,360e9,5e9)
#depths = map(s.depth, p)
#we could also just specify some depth levels directly like this:
#depths = np.arange(35e3,5600e3,100e3)
#now evaluate everything at the given depths levels (using interpolation)
#pressures, density, v_p, v_s, v_phi = s.evaluate_all_at(depths)
# plot vs and vp and v_phi (note that v_phi is computed!)
plt.plot(depths/1.e3,v_p/1.e3,'+-r', label='v_p')
plt.plot(depths/1.e3,v_s/1.e3,'+-b', label='v_s')
plt.plot(depths/1.e3,v_phi/1.e3,'--g', label='v_phi')
plt.legend()
plt.xlabel('depth in km')
plt.ylabel('km/s')
plt.show()
s1=prem()
depths=s1.internal_depth_list()
pressures, density, v_p, v_s, v_phi = s1.evaluate_all_at(depths)
plt.plot(depths/1.e3,v_p/1.e3,'+-r', label='v_p')
plt.plot(depths/1.e3,v_s/1.e3,'+-b', label='v_s')
plt.plot(depths/1.e3,v_phi/1.e3,'--g', label='v_phi')
s2=prem_test()
depths=s2.internal_depth_list()
pressures, density, v_p, v_s, v_phi = s2.evaluate_all_at(depths)
plt.plot(depths,v_p/1.e3,'x-r', label='v_p')
plt.plot(depths,v_s/1.e3,'x-b', label='v_s')
plt.plot(depths,v_phi/1.e3,'x-g', label='v_phi')
plt.show()
| gpl-2.0 |
datalyze-solutions/pandas-qt | setup.py | 1 | 2771 |
from __future__ import print_function
from setuptools import setup, find_packages
from setuptools.command.test import test as TestCommand
import io
import codecs
import os
import re
import sys
# TODO: sip is only needed for PyQt4, they should be imported together.
try:
import sip
except ImportError as e:
raise ImportError, "install sip first (comming with PyQt4)"
try:
import PyQt4
except ImportError as e:
# TODO: try to import PySide.
raise ImportError, "install PyQt4 or PySide"
here = os.path.abspath(os.path.dirname(__file__))
version_file = open(os.path.join(here, 'pandasqt', '__init__.py'), 'rU')
__version__ = re.sub(
r".*\b__version__\s+=\s+'([^']+)'.*",
r'\1',
[ line.strip() for line in version_file if '__version__' in line ].pop(0)
)
version_file.close()
def read(*filenames, **kwargs):
encoding = kwargs.get('encoding', 'utf-8')
sep = kwargs.get('sep', '\n')
buf = []
for filename in filenames:
with io.open(filename, encoding=encoding) as f:
buf.append(f.read())
return sep.join(buf)
long_description = read('README')
class PyTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
import pytest
errcode = pytest.main(self.test_args)
sys.exit(errcode)
tests_require = ['easygui', 'pandas == 0.17.1', 'pyside', 'pytest', 'pytest-cov', 'pytest-qt', 'python-magic==0.4.6']
setup(
name='pandas-qt',
version=__version__,
url='https://github.com/datalyze-solutions/pandas-qt',
license='MIT License',
namespace_packages = ['pandasqt'],
author='Matthias Ludwig, Marcel Radischat',
tests_require=tests_require,
install_requires=['easygui', 'pandas==0.17.1', 'pytest', 'pytest-qt==1.2.2', 'pytest-cov', 'python-magic==0.4.6'],
cmdclass={'test': PyTest},
author_email='[email protected]',
description='Utilities to use pandas (the data analysis / manipulation library for Python) with Qt.',
long_description=long_description,
include_package_data=True,
packages=['pandasqt'],
platforms='any',
test_suite='tests',
classifiers = [
'Programming Language :: Python',
'Development Status :: 4 - Beta',
'Natural Language :: German',
'Environment :: X11 Applications :: Qt',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Software Development :: User Interfaces'
],
extras_require={
'testing': tests_require,
}
) | mit |
datachand/h2o-3 | h2o-py/tests/testdir_algos/glm/pyunit_link_functions_poissonGLM.py | 5 | 2254 | import sys
sys.path.insert(1, "../../../")
import h2o, tests
import pandas as pd
import zipfile
import statsmodels.api as sm
def link_functions_poisson():
print("Read in prostate data.")
h2o_data = h2o.import_file(path=h2o.locate("smalldata/prostate/prostate_complete.csv.zip"))
sm_data = pd.read_csv(zipfile.ZipFile(h2o.locate("smalldata/prostate/prostate_complete.csv.zip")).
open("prostate_complete.csv")).as_matrix()
sm_data_response = sm_data[:,9]
sm_data_features = sm_data[:,1:9]
print("Testing for family: POISSON")
print("Set variables for h2o.")
myY = "GLEASON"
myX = ["ID","AGE","RACE","CAPSULE","DCAPS","PSA","VOL","DPROS"]
print("Create h2o model with canonical link: LOG")
h2o_model_log = h2o.glm(x=h2o_data[myX], y=h2o_data[myY], family="poisson", link="log",alpha=[0.5], Lambda=[0])
print("Create statsmodel model with canonical link: LOG")
sm_model_log = sm.GLM(endog=sm_data_response, exog=sm_data_features,
family=sm.families.Poisson(sm.families.links.log)).fit()
print("Compare model deviances for link function log")
h2o_deviance_log = h2o_model_log.residual_deviance() / h2o_model_log.null_deviance()
sm_deviance_log = sm_model_log.deviance / sm_model_log.null_deviance
assert h2o_deviance_log - sm_deviance_log < 0.01, "expected h2o to have an equivalent or better deviance measures"
print("Create h2o models with link: IDENTITY")
h2o_model_id = h2o.glm(x=h2o_data[myX], y=h2o_data[myY], family="poisson", link="identity",alpha=[0.5], Lambda=[0])
print("Create statsmodel models with link: IDENTITY")
sm_model_id = sm.GLM(endog=sm_data_response, exog=sm_data_features,
family=sm.families.Poisson(sm.families.links.identity)).fit()
print("Compare model deviances for link function identity")
h2o_deviance_id = h2o_model_id.residual_deviance() / h2o_model_id.null_deviance()
sm_deviance_id = sm_model_id.deviance / sm_model_id.null_deviance
assert h2o_deviance_id - sm_deviance_id < 0.01, "expected h2o to have an equivalent or better deviance measures"
if __name__ == "__main__":
tests.run_test(sys.argv, link_functions_poisson)
| apache-2.0 |
raincoatrun/ThinkStats2 | code/chap12soln.py | 68 | 4459 | """This file contains code for use with "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2014 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function
import pandas
import numpy as np
import statsmodels.formula.api as smf
import thinkplot
import thinkstats2
import regression
import timeseries
def RunQuadraticModel(daily):
"""Runs a linear model of prices versus years.
daily: DataFrame of daily prices
returns: model, results
"""
daily['years2'] = daily.years**2
model = smf.ols('ppg ~ years + years2', data=daily)
results = model.fit()
return model, results
def PlotQuadraticModel(daily, name):
"""
"""
model, results = RunQuadraticModel(daily)
regression.SummarizeResults(results)
timeseries.PlotFittedValues(model, results, label=name)
thinkplot.Save(root='timeseries11',
title='fitted values',
xlabel='years',
xlim=[-0.1, 3.8],
ylabel='price per gram ($)')
timeseries.PlotResidualPercentiles(model, results)
thinkplot.Save(root='timeseries12',
title='residuals',
xlabel='years',
ylabel='price per gram ($)')
years = np.linspace(0, 5, 101)
thinkplot.Scatter(daily.years, daily.ppg, alpha=0.1, label=name)
timeseries.PlotPredictions(daily, years, func=RunQuadraticModel)
thinkplot.Save(root='timeseries13',
title='predictions',
xlabel='years',
xlim=[years[0]-0.1, years[-1]+0.1],
ylabel='price per gram ($)')
def PlotEwmaPredictions(daily, name):
"""
"""
# use EWMA to estimate slopes
filled = timeseries.FillMissing(daily)
filled['slope'] = pandas.ewma(filled.ppg.diff(), span=180)
filled[-1:]
# extract the last inter and slope
start = filled.index[-1]
inter = filled.ewma[-1]
slope = filled.slope[-1]
# reindex the DataFrame, adding a year to the end
dates = pandas.date_range(filled.index.min(),
filled.index.max() + np.timedelta64(365, 'D'))
predicted = filled.reindex(dates)
# generate predicted values and add them to the end
predicted['date'] = predicted.index
one_day = np.timedelta64(1, 'D')
predicted['days'] = (predicted.date - start) / one_day
predict = inter + slope * predicted.days
predicted.ewma.fillna(predict, inplace=True)
# plot the actual values and predictions
thinkplot.Scatter(daily.ppg, alpha=0.1, label=name)
thinkplot.Plot(predicted.ewma)
thinkplot.Save()
class SerialCorrelationTest(thinkstats2.HypothesisTest):
"""Tests serial correlations by permutation."""
def TestStatistic(self, data):
"""Computes the test statistic.
data: tuple of xs and ys
"""
series, lag = data
test_stat = abs(thinkstats2.SerialCorr(series, lag))
return test_stat
def RunModel(self):
"""Run the model of the null hypothesis.
returns: simulated data
"""
series, lag = self.data
permutation = series.reindex(np.random.permutation(series.index))
return permutation, lag
def TestSerialCorr(daily):
"""Tests serial correlations in daily prices and their residuals.
daily: DataFrame of daily prices
"""
# test the correlation between consecutive prices
series = daily.ppg
test = SerialCorrelationTest((series, 1))
pvalue = test.PValue()
print(test.actual, pvalue)
# test for serial correlation in residuals of the linear model
_, results = timeseries.RunLinearModel(daily)
series = results.resid
test = SerialCorrelationTest((series, 1))
pvalue = test.PValue()
print(test.actual, pvalue)
# test for serial correlation in residuals of the quadratic model
_, results = RunQuadraticModel(daily)
series = results.resid
test = SerialCorrelationTest((series, 1))
pvalue = test.PValue()
print(test.actual, pvalue)
def main(name):
transactions = timeseries.ReadData()
dailies = timeseries.GroupByQualityAndDay(transactions)
name = 'high'
daily = dailies[name]
PlotQuadraticModel(daily, name)
TestSerialCorr(daily)
PlotEwmaPredictions(daily, name)
if __name__ == '__main__':
import sys
main(*sys.argv)
| gpl-3.0 |
yyjiang/scikit-learn | examples/manifold/plot_manifold_sphere.py | 258 | 5101 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=============================================
Manifold Learning methods on a severed sphere
=============================================
An application of the different :ref:`manifold` techniques
on a spherical data-set. Here one can see the use of
dimensionality reduction in order to gain some intuition
regarding the manifold learning methods. Regarding the dataset,
the poles are cut from the sphere, as well as a thin slice down its
side. This enables the manifold learning techniques to
'spread it open' whilst projecting it onto two dimensions.
For a similar example, where the methods are applied to the
S-curve dataset, see :ref:`example_manifold_plot_compare_methods.py`
Note that the purpose of the :ref:`MDS <multidimensional_scaling>` is
to find a low-dimensional representation of the data (here 2D) in
which the distances respect well the distances in the original
high-dimensional space, unlike other manifold-learning algorithms,
it does not seeks an isotropic representation of the data in
the low-dimensional space. Here the manifold problem matches fairly
that of representing a flat map of the Earth, as with
`map projection <http://en.wikipedia.org/wiki/Map_projection>`_
"""
# Author: Jaques Grobler <[email protected]>
# License: BSD 3 clause
print(__doc__)
from time import time
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.ticker import NullFormatter
from sklearn import manifold
from sklearn.utils import check_random_state
# Next line to silence pyflakes.
Axes3D
# Variables for manifold learning.
n_neighbors = 10
n_samples = 1000
# Create our sphere.
random_state = check_random_state(0)
p = random_state.rand(n_samples) * (2 * np.pi - 0.55)
t = random_state.rand(n_samples) * np.pi
# Sever the poles from the sphere.
indices = ((t < (np.pi - (np.pi / 8))) & (t > ((np.pi / 8))))
colors = p[indices]
x, y, z = np.sin(t[indices]) * np.cos(p[indices]), \
np.sin(t[indices]) * np.sin(p[indices]), \
np.cos(t[indices])
# Plot our dataset.
fig = plt.figure(figsize=(15, 8))
plt.suptitle("Manifold Learning with %i points, %i neighbors"
% (1000, n_neighbors), fontsize=14)
ax = fig.add_subplot(251, projection='3d')
ax.scatter(x, y, z, c=p[indices], cmap=plt.cm.rainbow)
try:
# compatibility matplotlib < 1.0
ax.view_init(40, -10)
except:
pass
sphere_data = np.array([x, y, z]).T
# Perform Locally Linear Embedding Manifold learning
methods = ['standard', 'ltsa', 'hessian', 'modified']
labels = ['LLE', 'LTSA', 'Hessian LLE', 'Modified LLE']
for i, method in enumerate(methods):
t0 = time()
trans_data = manifold\
.LocallyLinearEmbedding(n_neighbors, 2,
method=method).fit_transform(sphere_data).T
t1 = time()
print("%s: %.2g sec" % (methods[i], t1 - t0))
ax = fig.add_subplot(252 + i)
plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow)
plt.title("%s (%.2g sec)" % (labels[i], t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
# Perform Isomap Manifold learning.
t0 = time()
trans_data = manifold.Isomap(n_neighbors, n_components=2)\
.fit_transform(sphere_data).T
t1 = time()
print("%s: %.2g sec" % ('ISO', t1 - t0))
ax = fig.add_subplot(257)
plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow)
plt.title("%s (%.2g sec)" % ('Isomap', t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
# Perform Multi-dimensional scaling.
t0 = time()
mds = manifold.MDS(2, max_iter=100, n_init=1)
trans_data = mds.fit_transform(sphere_data).T
t1 = time()
print("MDS: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(258)
plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow)
plt.title("MDS (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
# Perform Spectral Embedding.
t0 = time()
se = manifold.SpectralEmbedding(n_components=2,
n_neighbors=n_neighbors)
trans_data = se.fit_transform(sphere_data).T
t1 = time()
print("Spectral Embedding: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(259)
plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow)
plt.title("Spectral Embedding (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
# Perform t-distributed stochastic neighbor embedding.
t0 = time()
tsne = manifold.TSNE(n_components=2, init='pca', random_state=0)
trans_data = tsne.fit_transform(sphere_data).T
t1 = time()
print("t-SNE: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(250)
plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow)
plt.title("t-SNE (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
plt.show()
| bsd-3-clause |
3324fr/spinalcordtoolbox | scripts/sct_viewer.py | 1 | 40399 | #!/usr/bin/env python
#########################################################################################
#
# Visualizer for MRI volumes
#
#
# ---------------------------------------------------------------------------------------
# Copyright (c) 2015 Polytechnique Montreal <www.neuro.polymtl.ca>
# Authors: Benjamin De Leener
# Created: 2015-01-30
#
# Notes on how to use classes in this script.
# If you are interested into selecting manually some points in an image, you can use the following code.
# from sct_viewer import ClickViewer
# from msct_image import Image
#
# im_input = Image('my_image.nii.gz')
#
# im_input_SAL = im_input.copy()
# # SAL orientation is mandatory
# im_input_SAL.change_orientation('SAL')
# # The viewer is composed by a primary plot and a secondary plot. The primary plot is the one you will click points in.
# # The secondary plot will help you go throughout slices in another dimensions to help manual selection.
# viewer = ClickViewer(im_input_SAL, orientation_subplot=['sag', 'ax'])
# viewer.number_of_slices = X # Change X appropriately.
# viewer.gap_inter_slice = Y # this number should reflect image spacing
# viewer.calculate_list_slices()
# # start the viewer that ask the user to enter a few points along the spinal cord
# mask_points = viewer.start()
# print mask_points
#
# About the license: see the file LICENSE.TXT
#########################################################################################
import sys
from msct_parser import Parser
from msct_image import Image
from bisect import bisect
from numpy import arange, max, pad, linspace, mean, median, std, percentile
import numpy as np
from msct_types import *
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
from matplotlib import cm
import sct_utils as sct
from time import time
from copy import copy
from matplotlib.widgets import Slider, Button, RadioButtons
import webbrowser
class SinglePlot:
"""
This class manages mouse events on one image.
"""
def __init__(self, ax, images, viewer, view=2, display_cross='hv', im_params=None):
self.axes = ax
self.images = images # this is a list of images
self.viewer = viewer
self.view = view
self.display_cross = display_cross
self.image_dim = self.images[0].data.shape
self.figs = []
self.cross_to_display = None
self.aspect_ratio = None
for i, image in enumerate(images):
data_to_display = None
if self.view == 1:
self.cross_to_display = [[[self.viewer.current_point.y, self.viewer.current_point.y], [-10000, 10000]],
[[-10000, 10000], [self.viewer.current_point.z, self.viewer.current_point.z]]]
self.aspect_ratio = self.viewer.aspect_ratio[0]
data_to_display = image.data[int(self.image_dim[0] / 2), :, :]
elif self.view == 2:
self.cross_to_display = [[[self.viewer.current_point.x, self.viewer.current_point.x], [-10000, 10000]],
[[-10000, 10000], [self.viewer.current_point.z, self.viewer.current_point.z]]]
self.aspect_ratio = self.viewer.aspect_ratio[1]
data_to_display = image.data[:, int(self.image_dim[1] / 2), :]
elif self.view == 3:
self.cross_to_display = [[[self.viewer.current_point.x, self.viewer.current_point.x], [-10000, 10000]],
[[-10000, 10000], [self.viewer.current_point.y, self.viewer.current_point.y]]]
self.aspect_ratio = self.viewer.aspect_ratio[2]
data_to_display = image.data[:, :, int(self.image_dim[2] / 2)]
if str(i) in im_params.images_parameters:
my_cmap = copy(cm.get_cmap(im_params.images_parameters[str(i)].cmap))
my_interpolation = im_params.images_parameters[str(i)].interp
my_alpha = float(im_params.images_parameters[str(i)].alpha)
else:
my_cmap = cm.get_cmap('gray')
my_interpolation = 'nearest'
my_alpha = 1.0
my_cmap.set_under('b', alpha=0)
self.figs.append(self.axes.imshow(data_to_display, aspect=self.aspect_ratio, alpha=my_alpha))
self.figs[-1].set_cmap(my_cmap)
self.figs[-1].set_interpolation(my_interpolation)
self.axes.set_axis_bgcolor('black')
self.axes.set_xticks([])
self.axes.set_yticks([])
self.line_horizontal = Line2D(self.cross_to_display[1][1], self.cross_to_display[1][0], color='white')
self.line_vertical = Line2D(self.cross_to_display[0][1], self.cross_to_display[0][0], color='white')
if 'h' in display_cross:
self.axes.add_line(self.line_horizontal)
if 'v' in display_cross:
self.axes.add_line(self.line_vertical)
self.zoom_factor = 1.0
def connect(self):
"""
connect to all the events we need
:return:
"""
self.cidpress_click = self.figs[0].figure.canvas.mpl_connect('button_press_event', self.on_press)
self.cidscroll = self.figs[0].figure.canvas.mpl_connect('scroll_event', self.on_scroll)
self.cidrelease = self.figs[0].figure.canvas.mpl_connect('button_release_event', self.on_release)
self.cidmotion = self.figs[0].figure.canvas.mpl_connect('motion_notify_event', self.on_motion)
def draw(self):
self.figs[0].figure.canvas.draw()
def update_slice(self, target, data_update=True):
"""
This function change the viewer to update the current slice
:param target: number of the slice to go on
:param data_update: False if you don't want to update data
:return:
"""
if isinstance(target, list):
target_slice = target[self.view - 1]
list_remaining_views = list([0, 1, 2])
list_remaining_views.remove(self.view - 1)
self.cross_to_display[0][0] = [target[list_remaining_views[0]], target[list_remaining_views[0]]]
self.cross_to_display[1][1] = [target[list_remaining_views[1]], target[list_remaining_views[1]]]
else:
target_slice = target
if self.view == 1:
if 0 <= target_slice < self.images[0].data.shape[0]:
if data_update:
for i, image in enumerate(self.images):
self.figs[i].set_data(image.data[target_slice, :, :])
if 'v' in self.display_cross:
self.line_vertical.set_ydata(self.cross_to_display[0][0])
if 'h' in self.display_cross:
self.line_horizontal.set_xdata(self.cross_to_display[1][1])
elif self.view == 2:
if 0 <= target_slice < self.images[0].data.shape[1]:
if data_update:
for i, image in enumerate(self.images):
self.figs[i].set_data(image.data[:, target_slice, :])
if 'v' in self.display_cross:
self.line_vertical.set_ydata(self.cross_to_display[0][0])
if 'h' in self.display_cross:
self.line_horizontal.set_xdata(self.cross_to_display[1][1])
elif self.view == 3:
if 0 <= target_slice < self.images[0].data.shape[2]:
if data_update:
for i, image in enumerate(self.images):
self.figs[i].set_data(image.data[:, :, target_slice])
if 'v' in self.display_cross:
self.line_vertical.set_ydata(self.cross_to_display[0][0])
if 'h' in self.display_cross:
self.line_horizontal.set_xdata(self.cross_to_display[1][1])
self.figs[0].figure.canvas.draw()
def on_press(self, event):
"""
when pressing on the screen, add point into a list, then change current slice
if finished, close the window and send the result
:param event:
:return:
"""
if event.button == 1 and event.inaxes == self.axes:
self.viewer.on_press(event, self)
return
def change_intensity(self, min_intensity, max_intensity, id_image=0):
self.figs[id_image].set_clim(min_intensity, max_intensity)
self.figs[id_image].figure.canvas.draw()
def on_motion(self, event):
if event.button == 1 and event.inaxes == self.axes:
return self.viewer.on_motion(event, self)
elif event.button == 3 and event.inaxes == self.axes:
return self.viewer.change_intensity(event, self)
else:
return
def on_release(self, event):
if event.button == 1:
return self.viewer.on_release(event, self)
elif event.button == 3:
return self.viewer.change_intensity(event, self)
else:
return
def update_xy_lim(self, x_center=None, y_center=None, x_scale_factor=1.0, y_scale_factor=1.0, zoom=True):
# get the current x and y limits
cur_xlim = self.axes.get_xlim()
cur_ylim = self.axes.get_ylim()
if x_center is None:
x_center = (cur_xlim[1] - cur_xlim[0]) / 2.0
if y_center is None:
y_center = (cur_ylim[1] - cur_ylim[0]) / 2.0
# Get distance from the cursor to the edge of the figure frame
x_left = x_center - cur_xlim[0]
x_right = cur_xlim[1] - x_center
y_top = y_center - cur_ylim[0]
y_bottom = cur_ylim[1] - y_center
if zoom:
scale_factor = (x_scale_factor + y_scale_factor) / 2.0
if 0.005 < self.zoom_factor * scale_factor <= 3.0:
self.zoom_factor *= scale_factor
self.axes.set_xlim([x_center - x_left * x_scale_factor, x_center + x_right * x_scale_factor])
self.axes.set_ylim([y_center - y_top * y_scale_factor, y_center + y_bottom * y_scale_factor])
self.figs[0].figure.canvas.draw()
else:
self.axes.set_xlim([x_center - x_left * x_scale_factor, x_center + x_right * x_scale_factor])
self.axes.set_ylim([y_center - y_top * y_scale_factor, y_center + y_bottom * y_scale_factor])
self.figs[0].figure.canvas.draw()
def on_scroll(self, event):
"""
when scrooling with the wheel, image is zoomed toward position on the screen
:param event:
:return:
"""
if event.inaxes == self.axes:
base_scale = 0.5
xdata, ydata = event.xdata, event.ydata
if event.button == 'up':
# deal with zoom in
scale_factor = 1 / base_scale
elif event.button == 'down':
# deal with zoom out
scale_factor = base_scale
else:
# deal with something that should never happen
scale_factor = 1.0
print event.button
self.update_xy_lim(x_center=xdata, y_center=ydata,
x_scale_factor=scale_factor, y_scale_factor=scale_factor,
zoom=True)
return
class Viewer(object):
def __init__(self, list_images, visualization_parameters=None):
self.images = []
for im in list_images:
if isinstance(im, Image):
self.images.append(im)
else:
print "Error, one of the images is actually not an image..."
# TODO: check same space
# TODO: check if at least one image
self.im_params = visualization_parameters
# initialisation of plot
self.fig = plt.figure(figsize=(8, 8))
self.fig.subplots_adjust(bottom=0.1, left=0.1)
self.fig.patch.set_facecolor('lightgrey')
# pad the image so that it is square in axial view (useful for zooming)
self.image_dim = self.images[0].data.shape
nx, ny, nz, nt, px, py, pz, pt = self.images[0].dim
self.im_spacing = [px, py, pz]
self.aspect_ratio = [float(self.im_spacing[1]) / float(self.im_spacing[2]),
float(self.im_spacing[0]) / float(self.im_spacing[2]),
float(self.im_spacing[0]) / float(self.im_spacing[1])]
self.offset = [0.0, 0.0, 0.0]
self.current_point = Coordinate([int(nx / 2), int(ny / 2), int(nz / 2)])
self.windows = []
self.press = [0, 0]
self.mean_intensity = []
self.std_intensity = []
self.last_update = time()
self.update_freq = 1.0/15.0 # 10 Hz
def compute_offset(self):
array_dim = [self.image_dim[0]*self.im_spacing[0], self.image_dim[1]*self.im_spacing[1], self.image_dim[2]*self.im_spacing[2]]
index_max = np.argmax(array_dim)
max_size = array_dim[index_max]
self.offset = [int(round((max_size - array_dim[0]) / self.im_spacing[0]) / 2),
int(round((max_size - array_dim[1]) / self.im_spacing[1]) / 2),
int(round((max_size - array_dim[2]) / self.im_spacing[2]) / 2)]
def pad_data(self):
for image in self.images:
image.data = pad(image.data,
((self.offset[0], self.offset[0]),
(self.offset[1], self.offset[1]),
(self.offset[2], self.offset[2])),
'constant',
constant_values=(0, 0))
def setup_intensity(self):
# TODO: change for segmentation images
for i, image in enumerate(self.images):
if str(i) in self.im_params.images_parameters:
vmin = self.im_params.images_parameters[str(i)].vmin
vmax = self.im_params.images_parameters[str(i)].vmax
vmean = self.im_params.images_parameters[str(i)].vmean
if self.im_params.images_parameters[str(i)].vmode == 'percentile':
flattened_volume = image.flatten()
first_percentile = percentile(flattened_volume[flattened_volume > 0], int(vmin))
last_percentile = percentile(flattened_volume[flattened_volume > 0], int(vmax))
mean_intensity = percentile(flattened_volume[flattened_volume > 0], int(vmean))
std_intensity = last_percentile - first_percentile
elif self.im_params.images_parameters[str(i)].vmode == 'mean-std':
mean_intensity = (float(vmax) + float(vmin)) / 2.0
std_intensity = (float(vmax) - float(vmin)) / 2.0
else:
flattened_volume = image.flatten()
first_percentile = percentile(flattened_volume[flattened_volume > 0], 0)
last_percentile = percentile(flattened_volume[flattened_volume > 0], 99)
mean_intensity = percentile(flattened_volume[flattened_volume > 0], 98)
std_intensity = last_percentile - first_percentile
self.mean_intensity.append(mean_intensity)
self.std_intensity.append(std_intensity)
min_intensity = mean_intensity - std_intensity
max_intensity = mean_intensity + std_intensity
for window in self.windows:
window.figs[i].set_clim(min_intensity, max_intensity)
def is_point_in_image(self, target_point):
return 0 <= target_point.x < self.image_dim[0] and 0 <= target_point.y < self.image_dim[1] and 0 <= target_point.z < self.image_dim[2]
def change_intensity(self, event, plot=None):
if abs(event.xdata - self.press[0]) < 1 and abs(event.ydata - self.press[1]) < 1:
self.press = event.xdata, event.ydata
return
if time() - self.last_update <= self.update_freq:
return
self.last_update = time()
xlim, ylim = self.windows[0].axes.get_xlim(), self.windows[0].axes.get_ylim()
mean_intensity_factor = (event.xdata - xlim[0]) / float(xlim[1] - xlim[0])
std_intensity_factor = (event.ydata - ylim[1]) / float(ylim[0] - ylim[1])
mean_factor = self.mean_intensity[0] - (mean_intensity_factor - 0.5) * self.mean_intensity[0] * 3.0
std_factor = self.std_intensity[0] + (std_intensity_factor - 0.5) * self.std_intensity[0] * 2.0
min_intensity = mean_factor - std_factor
max_intensity = mean_factor + std_factor
for window in self.windows:
window.change_intensity(min_intensity, max_intensity)
def get_event_coordinates(self, event, plot=None):
point = None
if plot.view == 1:
point = Coordinate([self.current_point.x,
int(round(event.ydata)),
int(round(event.xdata)), 1])
elif plot.view == 2:
point = Coordinate([int(round(event.ydata)),
self.current_point.y,
int(round(event.xdata)), 1])
elif plot.view == 3:
point = Coordinate([int(round(event.ydata)),
int(round(event.xdata)),
self.current_point.z, 1])
return point
def draw(self):
for window in self.windows:
window.fig.figure.canvas.draw()
def start(self):
plt.show()
class ThreeViewer(Viewer):
"""
This class is a visualizer for volumes (3D images) and ask user to click on axial slices.
Assumes AIL orientation
"""
def __init__(self, list_images, visualization_parameters=None):
if isinstance(list_images, Image):
list_images = [list_images]
if not visualization_parameters:
visualization_parameters = ParamMultiImageVisualization([ParamImageVisualization()])
super(ThreeViewer, self).__init__(list_images, visualization_parameters)
self.compute_offset()
self.pad_data()
self.current_point = Coordinate([int(self.images[0].data.shape[0] / 2), int(self.images[0].data.shape[1] / 2), int(self.images[0].data.shape[2] / 2)])
ax = self.fig.add_subplot(222)
self.windows.append(SinglePlot(ax=ax, images=self.images, viewer=self, view=1, im_params=visualization_parameters)) # SAL --> axial
ax = self.fig.add_subplot(223)
self.windows.append(SinglePlot(ax=ax, images=self.images, viewer=self, view=2, im_params=visualization_parameters)) # SAL --> frontal
ax = self.fig.add_subplot(221)
self.windows.append(SinglePlot(ax=ax, images=self.images, viewer=self, view=3, im_params=visualization_parameters)) # SAL --> sagittal
for window in self.windows:
window.connect()
self.setup_intensity()
def move(self, event, plot):
is_in_axes = False
for window in self.windows:
if event.inaxes == window.axes:
is_in_axes = True
if not is_in_axes:
return
if event.xdata and abs(event.xdata - self.press[0]) < 0.5 and abs(event.ydata - self.press[1]) < 0.5:
self.press = event.xdata, event.ydata
return
if time() - self.last_update <= self.update_freq:
return
self.last_update = time()
self.current_point = self.get_event_coordinates(event, plot)
point = [self.current_point.x, self.current_point.y, self.current_point.z]
for window in self.windows:
if window is plot:
window.update_slice(point, data_update=False)
else:
window.update_slice(point, data_update=True)
self.press = event.xdata, event.ydata
return
def on_press(self, event, plot=None):
if event.button == 1:
return self.move(event, plot)
else:
return
def on_motion(self, event, plot=None):
if event.button == 1:
return self.move(event, plot)
else:
return
def on_release(self, event, plot=None):
if event.button == 1:
return self.move(event, plot)
else:
return
class ClickViewer(Viewer):
"""
This class is a visualizer for volumes (3D images) and ask user to click on axial slices.
Assumes SAL orientation
orientation_subplot: list of two views that will be plotted next to each other. The first view is the main one (right) and the second view is the smaller one (left). Orientations are: ax, sag, cor.
"""
def __init__(self, list_images, visualization_parameters=None, orientation_subplot=['ax', 'sag']):
self.orientation = {'ax': 1, 'cor': 2, 'sag': 3}
if isinstance(list_images, Image):
list_images = [list_images]
if not visualization_parameters:
visualization_parameters = ParamMultiImageVisualization([ParamImageVisualization()])
super(ClickViewer, self).__init__(list_images, visualization_parameters)
self.primary_subplot = orientation_subplot[0]
self.secondary_subplot = orientation_subplot[1]
self.current_slice = 0
self.number_of_slices = 0
self.gap_inter_slice = 0
self.compute_offset()
self.pad_data()
self.current_point = Coordinate([int(self.images[0].data.shape[0] / 2), int(self.images[0].data.shape[1] / 2), int(self.images[0].data.shape[2] / 2)])
# display axes, specific to viewer
import matplotlib.gridspec as gridspec
gs = gridspec.GridSpec(1, 3)
# main plot on the right
ax = self.fig.add_subplot(gs[0, 1:], axisbg='k')
self.windows.append(SinglePlot(ax, self.images, self, view=self.orientation[self.primary_subplot], display_cross='', im_params=visualization_parameters))
self.plot_points, = self.windows[0].axes.plot([], [], '.r', markersize=10)
if self.primary_subplot == 'ax':
self.windows[0].axes.set_xlim([0, self.images[0].data.shape[2]])
self.windows[0].axes.set_ylim([self.images[0].data.shape[1], 0])
elif self.primary_subplot == 'cor':
self.windows[0].axes.set_xlim([0, self.images[0].data.shape[2]])
self.windows[0].axes.set_ylim([self.images[0].data.shape[0], 0])
elif self.primary_subplot == 'sag':
self.windows[0].axes.set_xlim([0, self.images[0].data.shape[0]])
self.windows[0].axes.set_ylim([self.images[0].data.shape[1], 0])
# smaller plot on the left
display_cross = ''
if self.primary_subplot == 'ax':
display_cross = 'v'
elif self.primary_subplot == 'cor':
display_cross = 'h'
elif self.primary_subplot == 'sag':
display_cross = 'h'
ax = self.fig.add_subplot(gs[0, 0], axisbg='k')
self.windows.append(SinglePlot(ax, self.images, self, view=self.orientation[self.secondary_subplot], display_cross=display_cross, im_params=visualization_parameters))
for window in self.windows:
window.connect()
self.ax_help = plt.axes([0.81, 0.05, 0.1, 0.075])
button_help = Button(self.ax_help, 'Help')
self.fig.canvas.mpl_connect('button_press_event', self.help)
self.help_url = 'https://sourceforge.net/p/spinalcordtoolbox/wiki/Home/'
# specialized for Click viewer
self.list_points = []
self.list_points_useful_notation = ''
# compute slices to display
self.list_slices = []
self.calculate_list_slices()
# variable to check if all slices have been processed
self.all_processed = False
self.setup_intensity()
def calculate_list_slices(self):
if self.number_of_slices != 0 and self.gap_inter_slice != 0: # mode multiple points with fixed gap
central_slice = int(self.image_dim[self.orientation[self.primary_subplot]-1] / 2)
first_slice = central_slice - (self.number_of_slices / 2) * self.gap_inter_slice
last_slice = central_slice + (self.number_of_slices / 2) * self.gap_inter_slice
if first_slice < 0:
first_slice = 0
if last_slice >= self.image_dim[self.orientation[self.primary_subplot]-1]:
last_slice = self.image_dim[self.orientation[self.primary_subplot]-1] - 1
self.list_slices = [int(item) for item in
linspace(first_slice, last_slice, self.number_of_slices, endpoint=True)]
elif self.number_of_slices != 0:
self.list_slices = [int(item) for item in
linspace(0, self.image_dim[self.orientation[self.primary_subplot]-1] - 1, self.number_of_slices, endpoint=True)]
if self.list_slices[-1] != self.image_dim[self.orientation[self.primary_subplot]-1] - 1:
self.list_slices.append(self.image_dim[self.orientation[self.primary_subplot]-1] - 1)
elif self.gap_inter_slice != 0:
self.list_slices = list(arange(0, self.image_dim[self.orientation[self.primary_subplot]-1], self.gap_inter_slice))
if self.list_slices[-1] != self.image_dim[self.orientation[self.primary_subplot]-1] - 1:
self.list_slices.append(self.image_dim[self.orientation[self.primary_subplot]-1] - 1)
else:
self.gap_inter_slice = int(max([round(self.image_dim[self.orientation[self.primary_subplot]-1] / 15.0), 1]))
self.number_of_slices = int(round(self.image_dim[self.orientation[self.primary_subplot]-1] / self.gap_inter_slice))
self.list_slices = [int(item) for item in
linspace(0, self.image_dim[self.orientation[self.primary_subplot]-1] - 1, self.number_of_slices, endpoint=True)]
if self.list_slices[-1] != self.image_dim[self.orientation[self.primary_subplot]-1] - 1:
self.list_slices.append(self.image_dim[self.orientation[self.primary_subplot]-1] - 1)
point = [self.current_point.x, self.current_point.y, self.current_point.z]
point[self.orientation[self.primary_subplot]-1] = self.list_slices[self.current_slice]
for window in self.windows:
if window.view == self.orientation[self.secondary_subplot]:
window.update_slice(point, data_update=False)
else:
window.update_slice(point, data_update=True)
self.windows[1].axes.set_title('Click and hold\nto move around')
self.title = self.windows[0].axes.set_title('Please select a new point on slice ' + str(self.list_slices[self.current_slice]) + '/' + str(
self.image_dim[self.orientation[self.primary_subplot]-1] - 1) + ' (' + str(self.current_slice + 1) + '/' + str(len(self.list_slices)) + ')')
def compute_offset(self):
if self.primary_subplot == 'ax':
array_dim = [self.image_dim[1] * self.im_spacing[1], self.image_dim[2] * self.im_spacing[2]]
index_max = np.argmax(array_dim)
max_size = array_dim[index_max]
self.offset = [0,
int(round((max_size - array_dim[0]) / self.im_spacing[1]) / 2),
int(round((max_size - array_dim[1]) / self.im_spacing[2]) / 2)]
elif self.primary_subplot == 'cor':
array_dim = [self.image_dim[0] * self.im_spacing[0], self.image_dim[2] * self.im_spacing[2]]
index_max = np.argmax(array_dim)
max_size = array_dim[index_max]
self.offset = [int(round((max_size - array_dim[0]) / self.im_spacing[0]) / 2),
0,
int(round((max_size - array_dim[1]) / self.im_spacing[2]) / 2)]
elif self.primary_subplot == 'sag':
array_dim = [self.image_dim[0] * self.im_spacing[0], self.image_dim[1] * self.im_spacing[1]]
index_max = np.argmax(array_dim)
max_size = array_dim[index_max]
self.offset = [int(round((max_size - array_dim[0]) / self.im_spacing[0]) / 2),
int(round((max_size - array_dim[1]) / self.im_spacing[1]) / 2),
0]
def on_press(self, event, plot=None):
# below is the subplot that refers to the label collection
if event.inaxes and plot.view == self.orientation[self.primary_subplot]:
if self.primary_subplot == 'ax':
target_point = Coordinate([int(self.list_slices[self.current_slice]), int(event.ydata) - self.offset[1], int(event.xdata) - self.offset[2], 1])
elif self.primary_subplot == 'cor':
target_point = Coordinate([int(event.ydata) - self.offset[0], int(self.list_slices[self.current_slice]), int(event.xdata) - self.offset[2], 1])
elif self.primary_subplot == 'sag':
target_point = Coordinate([int(event.ydata) - self.offset[0], int(event.xdata) - self.offset[1], int(self.list_slices[self.current_slice]), 1])
if self.is_point_in_image(target_point):
self.list_points.append(target_point)
self.current_slice += 1
if self.current_slice < len(self.list_slices):
point = [self.current_point.x, self.current_point.y, self.current_point.z]
point[self.orientation[self.secondary_subplot]-1] = self.list_slices[self.current_slice]
self.current_point = Coordinate(point)
self.windows[0].update_slice(self.list_slices[self.current_slice])
title_obj = self.windows[0].axes.set_title('Please select a new point on slice ' +
str(self.list_slices[self.current_slice]) + '/' +
str(self.image_dim[self.orientation[self.primary_subplot]-1] - 1) + ' (' +
str(self.current_slice + 1) + '/' +
str(len(self.list_slices)) + ')')
plt.setp(title_obj, color='k')
plot.draw()
self.windows[1].update_slice(point, data_update=False)
else:
for coord in self.list_points:
if self.list_points_useful_notation != '':
self.list_points_useful_notation += ':'
self.list_points_useful_notation = self.list_points_useful_notation + str(coord.x) + ',' + str(
coord.y) + ',' + str(coord.z) + ',' + str(coord.value)
self.all_processed = True
plt.close()
else:
title_obj = self.windows[0].axes.set_title('The point you selected in not in the image. Please try again.')
plt.setp(title_obj, color='r')
plot.draw()
elif event.inaxes and plot.view == self.orientation[self.secondary_subplot]:
is_in_axes = False
for window in self.windows:
if event.inaxes == window.axes:
is_in_axes = True
if not is_in_axes:
return
self.last_update = time()
self.current_point = self.get_event_coordinates(event, plot)
point = [self.current_point.x, self.current_point.y, self.current_point.z]
for window in self.windows:
if window is plot:
window.update_slice(point, data_update=False)
else:
self.draw_points(window, self.current_point.x)
window.update_slice(point, data_update=True)
def draw_points(self, window, current_slice):
if window.view == self.orientation[self.primary_subplot]:
x_data, y_data = [], []
for pt in self.list_points:
if pt.x == current_slice:
x_data.append(pt.z + self.offset[2])
y_data.append(pt.y + self.offset[1])
self.plot_points.set_xdata(x_data)
self.plot_points.set_ydata(y_data)
def on_release(self, event, plot=None):
"""
This subplot refers to the secondary window. It captures event "release"
:param event:
:param plot:
:return:
"""
if event.button == 1 and event.inaxes and plot.view == self.orientation[self.secondary_subplot]:
point = [self.current_point.x, self.current_point.y, self.current_point.z]
point[self.orientation[self.primary_subplot]-1] = self.list_slices[self.current_slice]
for window in self.windows:
if window is plot:
window.update_slice(point, data_update=False)
else:
self.draw_points(window, self.current_point.y)
window.update_slice(point, data_update=True)
return
def on_motion(self, event, plot=None):
"""
This subplot refers to the secondary window. It captures event "motion"
:param event:
:param plot:
:return:
"""
if event.button == 1 and event.inaxes and plot.view == self.orientation[self.secondary_subplot] and time() - self.last_update > self.update_freq:
is_in_axes = False
for window in self.windows:
if event.inaxes == window.axes:
is_in_axes = True
if not is_in_axes:
return
self.last_update = time()
self.current_point = self.get_event_coordinates(event, plot)
point = [self.current_point.x, self.current_point.y, self.current_point.z]
for window in self.windows:
if window is plot:
window.update_slice(point, data_update=False)
else:
self.draw_points(window, self.current_point.x)
window.update_slice(point, data_update=True)
return
def get_results(self):
if self.list_points:
return self.list_points
else:
return None
def help(self, event):
if event.inaxes == self.ax_help:
webbrowser.open(self.help_url, new=0, autoraise=True)
def start(self):
super(ClickViewer, self).start()
if self.all_processed:
return self.list_points_useful_notation
else:
return None
def get_parser():
parser = Parser(__file__)
parser.usage.set_description('Volume Viewer')
parser.add_option(name="-i",
type_value=[[','], 'file'],
description="Images to display.",
mandatory=True,
example="anat.nii.gz")
parser.add_option(name='-mode',
type_value='multiple_choice',
description='Display mode.'
'\nviewer: standard three-window viewer.'
'\naxial: one-window viewer for manual centerline.\n',
mandatory=False,
default_value='viewer',
example=['viewer', 'axial'])
parser.add_option(name='-param',
type_value=[[':'], 'str'],
description='Parameters for visualization. '
'Separate images with \",\". Separate parameters with \":\".'
'\nid: number of image in the "-i" list'
'\ncmap: image colormap'
'\ninterp: image interpolation. Accepts: [\'nearest\' | \'bilinear\' | \'bicubic\' | \'spline16\' | '
'\'spline36\' | \'hanning\' | \'hamming\' | \'hermite\' | \'kaiser\' | '
'\'quadric\' | \'catrom\' | \'gaussian\' | \'bessel\' | \'mitchell\' | '
'\'sinc\' | \'lanczos\' | \'none\' |]'
'\nvmin:'
'\nvmax:'
'\nvmean:'
'\nperc: ',
mandatory=False,
example=['cmap=red:vmin=0:vmax=1', 'cmap=grey'])
parser.add_option(name="-v",
type_value="multiple_choice",
description="""Verbose. 0: nothing. 1: basic. 2: extended.""",
mandatory=False,
default_value='0',
example=['0', '1', '2'])
return parser
class ParamImageVisualization(object):
def __init__(self, id='0', mode='image', cmap='gray', interp='nearest', vmin='0', vmax='99', vmean='98', vmode='percentile', alpha='1.0'):
self.id = id
self.mode = mode
self.cmap = cmap
self.interp = interp
self.vmin = vmin
self.vmax = vmax
self.vmean = vmean
self.vmode = vmode
self.alpha = alpha
def update(self, params):
list_objects = params.split(',')
for obj in list_objects:
if len(obj) < 2:
sct.printv('Please check parameter -param (usage changed from previous version)', 1, type='error')
objs = obj.split('=')
setattr(self, objs[0], objs[1])
class ParamMultiImageVisualization(object):
"""
This class contains a dictionary with the params of multiple images visualization
"""
def __init__(self, list_param):
self.ids = []
self.images_parameters = dict()
for param_image in list_param:
if isinstance(param_image, ParamImageVisualization):
self.images_parameters[param_image.id] = param_image
else:
self.addImage(param_image)
def addImage(self, param_image):
param_im = ParamImageVisualization()
param_im.update(param_image)
if param_im.id != 0:
if param_im.id in self.images_parameters:
self.images_parameters[param_im.id].update(param_image)
else:
self.images_parameters[param_im.id] = param_im
else:
sct.printv("ERROR: parameters must contain 'id'", 1, 'error')
def prepare(list_images):
fname_images, orientation_images = [], []
for fname_im in list_images:
from sct_image import orientation
orientation_images.append(orientation(Image(fname_im), get=True, verbose=False))
path_fname, file_fname, ext_fname = sct.extract_fname(fname_im)
reoriented_image_filename = 'tmp.' + sct.add_suffix(file_fname + ext_fname, "_SAL")
sct.run('sct_image -i ' + fname_im + ' -o ' + reoriented_image_filename + ' -setorient SAL -v 0', verbose=False)
fname_images.append(reoriented_image_filename)
return fname_images, orientation_images
def clean():
sct.run('rm -rf ' + 'tmp.*', verbose=False)
#=======================================================================================================================
# Start program
#=======================================================================================================================
if __name__ == "__main__":
parser = get_parser()
arguments = parser.parse(sys.argv[1:])
fname_images, orientation_images = prepare(arguments["-i"])
list_images = [Image(fname) for fname in fname_images]
mode = arguments['-mode']
param_image1 = ParamImageVisualization()
visualization_parameters = ParamMultiImageVisualization([param_image1])
if "-param" in arguments:
param_images = arguments['-param']
# update registration parameters
for param in param_images:
visualization_parameters.addImage(param)
if mode == 'viewer':
# 3 views
viewer = ThreeViewer(list_images, visualization_parameters)
viewer.start()
elif mode == 'axial':
# only one axial view
viewer = ClickViewer(list_images, visualization_parameters)
viewer.start()
clean()
| mit |
ye-zhi/project-epsilon | code/utils/scripts/plot_mosaic.py | 4 | 2092 | """
"""
from __future__ import division, print_function
import sys, os, pdb
import numpy as np
import nibabel as nib
def plot_mosaic(img_data, transpose=False):
""" Return a mosaic plot for each slice of
the 3rd dimension of img_data
Parameters:
----------
img_data = 3D array
Returns:
-------
grid_2D : a 2D image with each slice of
the 3rd dimension of img_data plotted
in a mosaic
"""
n_slices = img_data.shape[2]
# Dimensions of the mosaic grid
n_rows = int(np.ceil(float(np.sqrt(n_slices))))
n_cols = int(np.ceil(float(n_slices)/float(n_rows)))
# Define the 2D mosaic
grid_2D = np.zeros((n_rows*img_data.shape[0], n_cols*img_data.shape[1]))
z = 0
for i in range(n_rows):
for j in range(n_cols):
if z < n_slices:
if transpose==True:
img_data_slice = img_data[:,::-1,z].T
else:
img_data_slice = img_data[:,::-1,z]
grid_2D[i*img_data.shape[0]:(i+1)*img_data.shape[0],\
j*img_data.shape[1]:(j+1)*img_data.shape[1]] = img_data_slice
z += 1
return grid_2D
if __name__=='__main__':
import matplotlib.pyplot as plt
plt.rcParams['image.cmap'] = 'gray'
plt.rcParams['image.interpolation'] = 'nearest'
project_path='../../../'
#img = nib.load(\
#'../../../data/ds005/sub001/BOLD/task001_run001/bold.nii.gz')
template = nib.load(project_path+\
'data/mni_icbm152_t1_tal_nlin_asym_09c_2mm.nii')
template_data_int = template.get_data()
template_data = template_data_int.astype(float)
img = nib.load(project_path+\
'data/ds005/sub001/model/model001/task001_run001.feat/' + \
'masked_filtered_func_data_mni.nii.gz')
img_data_int = img.get_data()
img_data = img_data_int.astype(float)
mean_data = np.mean(img_data, axis=-1)
plt.title('In brain voxels - mean values')
plt.imshow(plot_mosaic(template_data, transpose=False), cmap='gray', alpha=1)
plt.imshow(plot_mosaic(mean_data, transpose=False), cmap='gray', alpha=1)
plt.colorbar()
plt.show()
| bsd-3-clause |
britodasilva/pyhfo | pyhfo/core/book_reader.py | 1 | 4490 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
License: For personnal, educationnal, and research purpose, this software is
provided under the Gnu GPL (V.3) license. To use this software in
commercial application, please contact the authors.
Authors: Jaroslaw Zygierewicz ([email protected]),
Piotr J. Durka ([email protected]), Magdalena Zieleniewska ([email protected])
Date : September, 2014
'''
from __future__ import print_function, division
import numpy as np
import collections
import sys, os
from operator import itemgetter
import matplotlib.pyplot as py
#import pandas as pd
import struct
from scipy.signal import filtfilt, butter
from scipy.stats import scoreatpercentile
class BookImporter(object):
def __init__(self, book_file):
"""
Class for reading books from mp5 decomposition.
Input:
book_file -- string -- book file
"""
super(BookImporter, self).__init__()
f = open(book_file,'rb')
data, signals, atoms, epoch_s = self._read_book(f)
self.epoch_s = epoch_s
self.atoms = atoms
self.signals = signals
self.fs = data[5]['Fs']
self.ptspmV = data[5]['ptspmV']
def _get_type(self, ident, f):
if ident == 1:
com_s = np.fromfile(f, '>u4', count=1)[0]
if not com_s==0: ## comment
return np.dtype([('comment', 'S'+str(com_s))])
else:
return None
elif ident == 2: ## header
head_s = np.fromfile(f, '>u4', count=1)[0]
return None
elif ident == 3: ## www address
www_s = np.fromfile(f, '>u1', count=1)[0]
return np.dtype([('www', 'S'+str(www_s))])
elif ident == 4: ## date
date_s = np.fromfile(f, '>u1', count=1)[0]
return np.dtype([('date', 'S'+str(date_s))])
elif ident == 5: ## signal info
#print 'here'
sig_info_s = np.fromfile(f, '>u1', count=1)[0]
return np.dtype([('Fs', '>f4'), ('ptspmV', '>f4'),
('chnl_cnt', '>u2')])
elif ident == 6: ## decomposition info
dec_info_s = np.fromfile(f, '>u1', count=1)[0]
return np.dtype([('percent', '>f4'), ('maxiterations', '>u4'),
('dict_size', '>u4'), ('dict_type', '>S1')])
elif ident == 10: #dirac
# return
atom_s = np.fromfile(f, '>u1', count=1)[0]
return np.dtype([('modulus', '>f4'), ('amplitude', '>f4'),
('t', '>f4')])
elif ident == 11: #gauss
atom_s = np.fromfile(f, '>u1', count=1)[0]
return np.dtype([('modulus', '>f4'), ('amplitude', '>f4'),
('t', '>f4'), ('scale', '>f4')])
elif ident == 12: #sinus
atom_s = np.fromfile(f, '>u1', count=1)[0]
return np.dtype([('modulus', '>f4'), ('amplitude', '>f4'),
('f', '>f4'), ('phase', '>f4')])
elif ident == 13: #gabor
atom_s = np.fromfile(f, '>u1', count=1)[0]
return np.dtype([('modulus', '>f4'), ('amplitude', '>f4'),
('t', '>f4'), ('scale', '>f4'),
('f', '>f4'), ('phase', '>f4')])
else:
return None
def _get_signal(self, f, epoch_nr, epoch_s):
sig_s = np.fromfile(f, '>u4', count=1)[0]
chnl_nr = np.fromfile(f, '>u2', count=1)[0]
signal = np.fromfile(f, '>f4', count= epoch_s)
return chnl_nr, signal
def _get_atoms(self, f):
atoms = list()
atoms_s = np.fromfile(f, '>u4', count=1)[0]
a_chnl_nr = np.fromfile(f, '>u2', count=1)[0]
ident = np.fromfile(f, '>u1', count=1)
while ident in [10, 11, 12, 13]:
atom = np.fromfile(f, self._get_type(ident[0], f), count=1)[0]
atoms.append({'params': atom, 'type': ident[0]})
ident = np.fromfile(f, '>u1', count=1)
f.seek(f.tell()-1)
return atoms, a_chnl_nr
def _read_book(self, f):
try:
f = open(f, 'rb')
except Exception:
f = f
version = np.fromfile(f, 'S6', count=1)
data = {}
ident = np.fromfile(f, 'u1', count=1)[0]
ct = self._get_type(ident, f)
signals = collections.defaultdict(list)
atoms = collections.defaultdict(list)
while ident:
if ct:
point = np.fromfile(f,ct, count=1)[0]
data[ident] = point
elif ident ==7:
data_s = np.fromfile(f, '>u4', count=1)[0]
epoch_nr = np.fromfile(f, '>u2', count=1)[0]
epoch_s = np.fromfile(f, '>u4', count=1)[0]
elif ident == 8:
chnl_nr, signal = self._get_signal(f, epoch_nr, epoch_s)
signals[epoch_nr].append(signal)
elif ident == 9:
pl = f.tell()
atom, a_chnl_nr = self._get_atoms(f)
atoms[epoch_nr] = atom
try:
ident = np.fromfile(f, '>u1', count=1)
if ident:
ident = ident[0]
ct = self._get_type(ident, f)
except Exception:
pass
return data, signals, atoms, epoch_s
| mit |
rosflight/firmware | scripts/plot_estimator_test.py | 1 | 1214 | import numpy as np
import matplotlib.pyplot as plt
stateType = np.dtype([
('t', np.float64),
('q', (np.float64,4)),
('qhat', (np.float64,4)),
('err', (np.float64,3)),
('eulerErr', (np.float64,3)),
('bias', (np.float32,3))
])
def plotResults(filename):
data = np.fromfile("../test/build/"+filename, dtype=stateType)
plt.figure(figsize=[12,9])
plt.suptitle(filename)
for i in range(4):
plt.subplot(4, 3, 3*i+1)
plt.plot(data['t'], data['q'][:,i], label="q")
plt.plot(data['t'], data['qhat'][:,i], label="qhat")
if i == 0:
plt.legend()
for i in range(3):
plt.subplot(4,3,3*i+2)
plt.plot(data['t'], data['err'][:,i])
plt.subplot(4,3,11)
err_norm = np.sqrt(np.sum(np.square(data['err']),axis=1))
plt.plot(data['t'], err_norm)
for i in range(3):
plt.subplot(4,3,3*i+3)
plt.plot(data['t'], data['bias'][:,i])
print("{} max error: {}".format(filename, np.max(err_norm)))
if __name__ == '__main__':
plotResults("linearGyro.bin")
plotResults("quadGyro.bin")
plotResults("expInt.bin")
plotResults("expQuadInt.bin")
plotResults("acc.bin")
plotResults("estState.bin")
plotResults("estBias.bin")
plotResults("estStateExtAtt.bin")
plotResults("movingExtAtt.bin")
plt.show()
| bsd-3-clause |
fqez/JdeRobot | src/drivers/MAVLinkServer/MAVProxy/modules/lib/live_graph.py | 8 | 2928 | #!/usr/bin/env python
"""
MAVProxy realtime graphing module, partly based on the wx graphing
demo by Eli Bendersky ([email protected])
http://eli.thegreenplace.net/files/prog_code/wx_mpl_dynamic_graph.py.txt
"""
from MAVProxy.modules.lib import mp_util
class LiveGraph():
'''
a live graph object using wx and matplotlib
All of the GUI work is done in a child process to provide some insulation
from the parent mavproxy instance and prevent instability in the GCS
New data is sent to the LiveGraph instance via a pipe
'''
def __init__(self,
fields,
title='MAVProxy: LiveGraph',
timespan=20.0,
tickresolution=0.2,
colors=[ 'red', 'green', 'blue', 'orange', 'olive', 'cyan', 'magenta', 'brown', 'dark green',
'violet', 'purple', 'grey', 'black']):
import multiprocessing
self.fields = fields
self.colors = colors
self.title = title
self.timespan = timespan
self.tickresolution = tickresolution
self.values = [None]*len(self.fields)
self.parent_pipe,self.child_pipe = multiprocessing.Pipe()
self.close_graph = multiprocessing.Event()
self.close_graph.clear()
self.child = multiprocessing.Process(target=self.child_task)
self.child.start()
def child_task(self):
'''child process - this holds all the GUI elements'''
mp_util.child_close_fds()
import matplotlib
import wx_processguard
from wx_loader import wx
from live_graph_ui import GraphFrame
matplotlib.use('WXAgg')
app = wx.App(False)
app.frame = GraphFrame(state=self)
app.frame.Show()
app.MainLoop()
def add_values(self, values):
'''add some data to the graph'''
if self.child.is_alive():
self.parent_pipe.send(values)
def close(self):
'''close the graph'''
self.close_graph.set()
if self.is_alive():
self.child.join(2)
def is_alive(self):
'''check if graph is still going'''
return self.child.is_alive()
if __name__ == "__main__":
# test the graph
import time, math
livegraph = LiveGraph(['sin(t)', 'cos(t)', 'sin(t+1)',
'cos(t+1)', 'sin(t+2)', 'cos(t+2)',
'cos(t+1)', 'sin(t+2)', 'cos(t+2)', 'x'],
timespan=30,
title='Graph Test')
while livegraph.is_alive():
t = time.time()
livegraph.add_values([math.sin(t), math.cos(t),
math.sin(t+1), math.cos(t+1),
math.sin(t+1), math.cos(t+1),
math.sin(t+1), math.cos(t+1),
math.sin(t+2), math.cos(t+2)])
time.sleep(0.05)
| gpl-3.0 |
fergalbyrne/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/numerix/__init__.py | 69 | 5473 | """
numerix imports either Numeric or numarray based on various selectors.
0. If the value "--numpy","--numarray" or "--Numeric" is specified on the
command line, then numerix imports the specified
array package.
1. The value of numerix in matplotlibrc: either Numeric or numarray
2. If none of the above is done, the default array package is Numeric.
Because the matplotlibrc always provides *some* value for numerix
(it has it's own system of default values), this default is most
likely never used.
To summarize: the commandline is examined first, the rc file second,
and the default array package is Numeric.
"""
import sys, os, struct
from matplotlib import rcParams, verbose
which = None, None
use_maskedarray = None
# First, see if --numarray or --Numeric was specified on the command
# line:
for a in sys.argv:
if a in ["--Numeric", "--numeric", "--NUMERIC",
"--Numarray", "--numarray", "--NUMARRAY",
"--NumPy", "--numpy", "--NUMPY", "--Numpy",
]:
which = a[2:], "command line"
if a == "--maskedarray":
use_maskedarray = True
if a == "--ma":
use_maskedarray = False
try: del a
except NameError: pass
if which[0] is None:
try: # In theory, rcParams always has *some* value for numerix.
which = rcParams['numerix'], "rc"
except KeyError:
pass
if use_maskedarray is None:
try:
use_maskedarray = rcParams['maskedarray']
except KeyError:
use_maskedarray = False
# If all the above fail, default to Numeric. Most likely not used.
if which[0] is None:
which = "numeric", "defaulted"
which = which[0].strip().lower(), which[1]
if which[0] not in ["numeric", "numarray", "numpy"]:
raise ValueError("numerix selector must be either 'Numeric', 'numarray', or 'numpy' but the value obtained from the %s was '%s'." % (which[1], which[0]))
if which[0] == "numarray":
import warnings
warnings.warn("numarray use as a numerix backed for matplotlib is deprecated",
DeprecationWarning, stacklevel=1)
#from na_imports import *
from numarray import *
from _na_imports import nx, inf, infinity, Infinity, Matrix, isnan, all
from numarray.numeric import nonzero
from numarray.convolve import cross_correlate, convolve
import numarray
version = 'numarray %s'%numarray.__version__
nan = struct.unpack('d', struct.pack('Q', 0x7ff8000000000000))[0]
elif which[0] == "numeric":
import warnings
warnings.warn("Numeric use as a numerix backed for matplotlib is deprecated",
DeprecationWarning, stacklevel=1)
#from nc_imports import *
from Numeric import *
from _nc_imports import nx, inf, infinity, Infinity, isnan, all, any
from Matrix import Matrix
import Numeric
version = 'Numeric %s'%Numeric.__version__
nan = struct.unpack('d', struct.pack('Q', 0x7ff8000000000000))[0]
elif which[0] == "numpy":
try:
import numpy.oldnumeric as numpy
from numpy.oldnumeric import *
except ImportError:
import numpy
from numpy import *
print 'except asarray', asarray
from _sp_imports import nx, infinity, rand, randn, isnan, all, any
from _sp_imports import UInt8, UInt16, UInt32, Infinity
try:
from numpy.oldnumeric.matrix import Matrix
except ImportError:
Matrix = matrix
version = 'numpy %s' % numpy.__version__
from numpy import nan
else:
raise RuntimeError("invalid numerix selector")
# Some changes are only applicable to the new numpy:
if (which[0] == 'numarray' or
which[0] == 'numeric'):
from mlab import amin, amax
newaxis = NewAxis
def typecode(a):
return a.typecode()
def iscontiguous(a):
return a.iscontiguous()
def byteswapped(a):
return a.byteswapped()
def itemsize(a):
return a.itemsize()
def angle(a):
return arctan2(a.imag, a.real)
else:
# We've already checked for a valid numerix selector,
# so assume numpy.
from mlab import amin, amax
newaxis = NewAxis
from numpy import angle
def typecode(a):
return a.dtype.char
def iscontiguous(a):
return a.flags.contiguous
def byteswapped(a):
return a.byteswap()
def itemsize(a):
return a.itemsize
verbose.report('numerix %s'%version)
# a bug fix for blas numeric suggested by Fernando Perez
matrixmultiply=dot
asum = sum
def _import_fail_message(module, version):
"""Prints a message when the array package specific version of an extension
fails to import correctly.
"""
_dict = { "which" : which[0],
"module" : module,
"specific" : version + module
}
print """
The import of the %(which)s version of the %(module)s module,
%(specific)s, failed. This is is either because %(which)s was
unavailable when matplotlib was compiled, because a dependency of
%(specific)s could not be satisfied, or because the build flag for
this module was turned off in setup.py. If it appears that
%(specific)s was not built, make sure you have a working copy of
%(which)s and then re-install matplotlib. Otherwise, the following
traceback gives more details:\n""" % _dict
g = globals()
l = locals()
__import__('ma', g, l)
__import__('fft', g, l)
__import__('linear_algebra', g, l)
__import__('random_array', g, l)
__import__('mlab', g, l)
la = linear_algebra
ra = random_array
| agpl-3.0 |
shahankhatch/scikit-learn | sklearn/tests/test_lda.py | 7 | 6707 | import numpy as np
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.datasets import make_blobs
from sklearn import lda
# Data is just 6 separable points in the plane
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]], dtype='f')
y = np.array([1, 1, 1, 2, 2, 2])
y3 = np.array([1, 1, 2, 2, 3, 3])
# Degenerate data with only one feature (still should be separable)
X1 = np.array([[-2, ], [-1, ], [-1, ], [1, ], [1, ], [2, ]], dtype='f')
solver_shrinkage = [('svd', None), ('lsqr', None), ('eigen', None),
('lsqr', 'auto'), ('lsqr', 0), ('lsqr', 0.43),
('eigen', 'auto'), ('eigen', 0), ('eigen', 0.43)]
def test_lda_predict():
# Test LDA classification.
# This checks that LDA implements fit and predict and returns correct values
# for simple toy data.
for test_case in solver_shrinkage:
solver, shrinkage = test_case
clf = lda.LDA(solver=solver, shrinkage=shrinkage)
y_pred = clf.fit(X, y).predict(X)
assert_array_equal(y_pred, y, 'solver %s' % solver)
# Assert that it works with 1D data
y_pred1 = clf.fit(X1, y).predict(X1)
assert_array_equal(y_pred1, y, 'solver %s' % solver)
# Test probability estimates
y_proba_pred1 = clf.predict_proba(X1)
assert_array_equal((y_proba_pred1[:, 1] > 0.5) + 1, y,
'solver %s' % solver)
y_log_proba_pred1 = clf.predict_log_proba(X1)
assert_array_almost_equal(np.exp(y_log_proba_pred1), y_proba_pred1,
8, 'solver %s' % solver)
# Primarily test for commit 2f34950 -- "reuse" of priors
y_pred3 = clf.fit(X, y3).predict(X)
# LDA shouldn't be able to separate those
assert_true(np.any(y_pred3 != y3), 'solver %s' % solver)
# Test invalid shrinkages
clf = lda.LDA(solver="lsqr", shrinkage=-0.2231)
assert_raises(ValueError, clf.fit, X, y)
clf = lda.LDA(solver="eigen", shrinkage="dummy")
assert_raises(ValueError, clf.fit, X, y)
clf = lda.LDA(solver="svd", shrinkage="auto")
assert_raises(NotImplementedError, clf.fit, X, y)
# Test unknown solver
clf = lda.LDA(solver="dummy")
assert_raises(ValueError, clf.fit, X, y)
def test_lda_coefs():
# Test if the coefficients of the solvers are approximately the same.
n_features = 2
n_classes = 2
n_samples = 1000
X, y = make_blobs(n_samples=n_samples, n_features=n_features,
centers=n_classes, random_state=11)
clf_lda_svd = lda.LDA(solver="svd")
clf_lda_lsqr = lda.LDA(solver="lsqr")
clf_lda_eigen = lda.LDA(solver="eigen")
clf_lda_svd.fit(X, y)
clf_lda_lsqr.fit(X, y)
clf_lda_eigen.fit(X, y)
assert_array_almost_equal(clf_lda_svd.coef_, clf_lda_lsqr.coef_, 1)
assert_array_almost_equal(clf_lda_svd.coef_, clf_lda_eigen.coef_, 1)
assert_array_almost_equal(clf_lda_eigen.coef_, clf_lda_lsqr.coef_, 1)
def test_lda_explained_variance_ratio():
# Test if the sum of the normalized eigen vectors values equals 1
n_features = 2
n_classes = 2
n_samples = 1000
X, y = make_blobs(n_samples=n_samples, n_features=n_features,
centers=n_classes, random_state=11)
clf_lda_eigen = lda.LDA(solver="eigen")
clf_lda_eigen.fit(X, y)
assert_almost_equal(clf_lda_eigen.explained_variance_ratio_.sum(), 1.0, 3)
def test_lda_transform():
# Test LDA transform.
clf = lda.LDA(solver="svd", n_components=1)
X_transformed = clf.fit(X, y).transform(X)
assert_equal(X_transformed.shape[1], 1)
clf = lda.LDA(solver="eigen", n_components=1)
X_transformed = clf.fit(X, y).transform(X)
assert_equal(X_transformed.shape[1], 1)
clf = lda.LDA(solver="lsqr", n_components=1)
clf.fit(X, y)
msg = "transform not implemented for 'lsqr'"
assert_raise_message(NotImplementedError, msg, clf.transform, X)
def test_lda_orthogonality():
# arrange four classes with their means in a kite-shaped pattern
# the longer distance should be transformed to the first component, and
# the shorter distance to the second component.
means = np.array([[0, 0, -1], [0, 2, 0], [0, -2, 0], [0, 0, 5]])
# We construct perfectly symmetric distributions, so the LDA can estimate
# precise means.
scatter = np.array([[0.1, 0, 0], [-0.1, 0, 0], [0, 0.1, 0], [0, -0.1, 0],
[0, 0, 0.1], [0, 0, -0.1]])
X = (means[:, np.newaxis, :] + scatter[np.newaxis, :, :]).reshape((-1, 3))
y = np.repeat(np.arange(means.shape[0]), scatter.shape[0])
# Fit LDA and transform the means
clf = lda.LDA(solver="svd").fit(X, y)
means_transformed = clf.transform(means)
d1 = means_transformed[3] - means_transformed[0]
d2 = means_transformed[2] - means_transformed[1]
d1 /= np.sqrt(np.sum(d1 ** 2))
d2 /= np.sqrt(np.sum(d2 ** 2))
# the transformed within-class covariance should be the identity matrix
assert_almost_equal(np.cov(clf.transform(scatter).T), np.eye(2))
# the means of classes 0 and 3 should lie on the first component
assert_almost_equal(np.abs(np.dot(d1[:2], [1, 0])), 1.0)
# the means of classes 1 and 2 should lie on the second component
assert_almost_equal(np.abs(np.dot(d2[:2], [0, 1])), 1.0)
def test_lda_scaling():
# Test if classification works correctly with differently scaled features.
n = 100
rng = np.random.RandomState(1234)
# use uniform distribution of features to make sure there is absolutely no
# overlap between classes.
x1 = rng.uniform(-1, 1, (n, 3)) + [-10, 0, 0]
x2 = rng.uniform(-1, 1, (n, 3)) + [10, 0, 0]
x = np.vstack((x1, x2)) * [1, 100, 10000]
y = [-1] * n + [1] * n
for solver in ('svd', 'lsqr', 'eigen'):
clf = lda.LDA(solver=solver)
# should be able to separate the data perfectly
assert_equal(clf.fit(x, y).score(x, y), 1.0,
'using covariance: %s' % solver)
def test_covariance():
x, y = make_blobs(n_samples=100, n_features=5,
centers=1, random_state=42)
# make features correlated
x = np.dot(x, np.arange(x.shape[1] ** 2).reshape(x.shape[1], x.shape[1]))
c_e = lda._cov(x, 'empirical')
assert_almost_equal(c_e, c_e.T)
c_s = lda._cov(x, 'auto')
assert_almost_equal(c_s, c_s.T)
| bsd-3-clause |
yeahkun/tushare | tushare/stock/billboard.py | 19 | 12058 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
龙虎榜数据
Created on 2015年6月10日
@author: Jimmy Liu
@group : waditu
@contact: [email protected]
"""
import pandas as pd
from pandas.compat import StringIO
from tushare.stock import cons as ct
import numpy as np
import time
import re
import lxml.html
from lxml import etree
from tushare.util import dateu as du
from tushare.stock import ref_vars as rv
try:
from urllib.request import urlopen, Request
except ImportError:
from urllib2 import urlopen, Request
def top_list(date = None, retry_count=3, pause=0.001):
"""
获取每日龙虎榜列表
Parameters
--------
date:string
明细数据日期 format:YYYY-MM-DD 如果为空,返回最近一个交易日的数据
retry_count : int, 默认 3
如遇网络等问题重复执行的次数
pause : int, 默认 0
重复请求数据过程中暂停的秒数,防止请求间隔时间太短出现的问题
Return
------
DataFrame
code:代码
name :名称
pchange:涨跌幅
amount:龙虎榜成交额(万)
buy:买入额(万)
bratio:占总成交比例
sell:卖出额(万)
sratio :占总成交比例
reason:上榜原因
date :日期
"""
if date is None:
if du.get_hour() < 18:
date = du.last_tddate()
else:
date = du.today()
else:
if(du.is_holiday(date)):
return None
for _ in range(retry_count):
time.sleep(pause)
try:
request = Request(rv.LHB_URL%(ct.P_TYPE['http'], ct.DOMAINS['em'], date))
text = urlopen(request, timeout=10).read()
text = text.decode('GBK')
html = lxml.html.parse(StringIO(text))
res = html.xpath("//table[@id=\"dt_1\"]")
if ct.PY3:
sarr = [etree.tostring(node).decode('utf-8') for node in res]
else:
sarr = [etree.tostring(node) for node in res]
sarr = ''.join(sarr)
df = pd.read_html(sarr)[0]
df.columns = [i for i in range(1,12)]
df = df.apply(_f_rows, axis=1)
df = df.fillna(method='ffill')
df = df.drop([1, 4], axis=1)
df.columns = rv.LHB_COLS
df = df.drop_duplicates()
df['code'] = df['code'].astype(int)
df['code'] = df['code'].map(lambda x: str(x).zfill(6))
df['date'] = date
except:
pass
else:
return df
raise IOError(ct.NETWORK_URL_ERROR_MSG)
def cap_tops(days= 5, retry_count= 3, pause= 0.001):
"""
获取个股上榜统计数据
Parameters
--------
days:int
天数,统计n天以来上榜次数,默认为5天,其余是10、30、60
retry_count : int, 默认 3
如遇网络等问题重复执行的次数
pause : int, 默认 0
重复请求数据过程中暂停的秒数,防止请求间隔时间太短出现的问题
Return
------
DataFrame
code:代码
name:名称
count:上榜次数
bamount:累积购买额(万)
samount:累积卖出额(万)
net:净额(万)
bcount:买入席位数
scount:卖出席位数
"""
if ct._check_lhb_input(days) is True:
ct._write_head()
df = _cap_tops(days, pageNo=1, retry_count=retry_count,
pause=pause)
df['code'] = df['code'].map(lambda x: str(x).zfill(6))
if df is not None:
df = df.drop_duplicates('code')
return df
def _cap_tops(last=5, pageNo=1, retry_count=3, pause=0.001, dataArr=pd.DataFrame()):
ct._write_console()
for _ in range(retry_count):
time.sleep(pause)
try:
request = Request(rv.LHB_SINA_URL%(ct.P_TYPE['http'], ct.DOMAINS['vsf'], rv.LHB_KINDS[0],
ct.PAGES['fd'], last, pageNo))
text = urlopen(request, timeout=10).read()
text = text.decode('GBK')
html = lxml.html.parse(StringIO(text))
res = html.xpath("//table[@id=\"dataTable\"]/tr")
if ct.PY3:
sarr = [etree.tostring(node).decode('utf-8') for node in res]
else:
sarr = [etree.tostring(node) for node in res]
sarr = ''.join(sarr)
sarr = '<table>%s</table>'%sarr
df = pd.read_html(sarr)[0]
df.columns = rv.LHB_GGTJ_COLS
dataArr = dataArr.append(df, ignore_index=True)
nextPage = html.xpath('//div[@class=\"pages\"]/a[last()]/@onclick')
if len(nextPage)>0:
pageNo = re.findall(r'\d+', nextPage[0])[0]
return _cap_tops(last, pageNo, retry_count, pause, dataArr)
else:
return dataArr
except Exception as e:
print(e)
def broker_tops(days= 5, retry_count= 3, pause= 0.001):
"""
获取营业部上榜统计数据
Parameters
--------
days:int
天数,统计n天以来上榜次数,默认为5天,其余是10、30、60
retry_count : int, 默认 3
如遇网络等问题重复执行的次数
pause : int, 默认 0
重复请求数据过程中暂停的秒数,防止请求间隔时间太短出现的问题
Return
---------
broker:营业部名称
count:上榜次数
bamount:累积购买额(万)
bcount:买入席位数
samount:累积卖出额(万)
scount:卖出席位数
top3:买入前三股票
"""
if ct._check_lhb_input(days) is True:
ct._write_head()
df = _broker_tops(days, pageNo=1, retry_count=retry_count,
pause=pause)
return df
def _broker_tops(last=5, pageNo=1, retry_count=3, pause=0.001, dataArr=pd.DataFrame()):
ct._write_console()
for _ in range(retry_count):
time.sleep(pause)
try:
request = Request(rv.LHB_SINA_URL%(ct.P_TYPE['http'], ct.DOMAINS['vsf'], rv.LHB_KINDS[1],
ct.PAGES['fd'], last, pageNo))
text = urlopen(request, timeout=10).read()
text = text.decode('GBK')
html = lxml.html.parse(StringIO(text))
res = html.xpath("//table[@id=\"dataTable\"]/tr")
if ct.PY3:
sarr = [etree.tostring(node).decode('utf-8') for node in res]
else:
sarr = [etree.tostring(node) for node in res]
sarr = ''.join(sarr)
sarr = '<table>%s</table>'%sarr
df = pd.read_html(sarr)[0]
df.columns = rv.LHB_YYTJ_COLS
dataArr = dataArr.append(df, ignore_index=True)
nextPage = html.xpath('//div[@class=\"pages\"]/a[last()]/@onclick')
if len(nextPage)>0:
pageNo = re.findall(r'\d+', nextPage[0])[0]
return _broker_tops(last, pageNo, retry_count, pause, dataArr)
else:
return dataArr
except Exception as e:
print(e)
def inst_tops(days= 5, retry_count= 3, pause= 0.001):
"""
获取机构席位追踪统计数据
Parameters
--------
days:int
天数,统计n天以来上榜次数,默认为5天,其余是10、30、60
retry_count : int, 默认 3
如遇网络等问题重复执行的次数
pause : int, 默认 0
重复请求数据过程中暂停的秒数,防止请求间隔时间太短出现的问题
Return
--------
code:代码
name:名称
bamount:累积买入额(万)
bcount:买入次数
samount:累积卖出额(万)
scount:卖出次数
net:净额(万)
"""
if ct._check_lhb_input(days) is True:
ct._write_head()
df = _inst_tops(days, pageNo=1, retry_count=retry_count,
pause=pause)
df['code'] = df['code'].map(lambda x: str(x).zfill(6))
return df
def _inst_tops(last=5, pageNo=1, retry_count=3, pause=0.001, dataArr=pd.DataFrame()):
ct._write_console()
for _ in range(retry_count):
time.sleep(pause)
try:
request = Request(rv.LHB_SINA_URL%(ct.P_TYPE['http'], ct.DOMAINS['vsf'], rv.LHB_KINDS[2],
ct.PAGES['fd'], last, pageNo))
text = urlopen(request, timeout=10).read()
text = text.decode('GBK')
html = lxml.html.parse(StringIO(text))
res = html.xpath("//table[@id=\"dataTable\"]/tr")
if ct.PY3:
sarr = [etree.tostring(node).decode('utf-8') for node in res]
else:
sarr = [etree.tostring(node) for node in res]
sarr = ''.join(sarr)
sarr = '<table>%s</table>'%sarr
df = pd.read_html(sarr)[0]
df = df.drop([2,3], axis=1)
df.columns = rv.LHB_JGZZ_COLS
dataArr = dataArr.append(df, ignore_index=True)
nextPage = html.xpath('//div[@class=\"pages\"]/a[last()]/@onclick')
if len(nextPage)>0:
pageNo = re.findall(r'\d+', nextPage[0])[0]
return _inst_tops(last, pageNo, retry_count, pause, dataArr)
else:
return dataArr
except Exception as e:
print(e)
def inst_detail(retry_count= 3, pause= 0.001):
"""
获取最近一个交易日机构席位成交明细统计数据
Parameters
--------
retry_count : int, 默认 3
如遇网络等问题重复执行的次数
pause : int, 默认 0
重复请求数据过程中暂停的秒数,防止请求间隔时间太短出现的问题
Return
----------
code:股票代码
name:股票名称
date:交易日期
bamount:机构席位买入额(万)
samount:机构席位卖出额(万)
type:类型
"""
ct._write_head()
df = _inst_detail(pageNo=1, retry_count=retry_count,
pause=pause)
if len(df)>0:
df['code'] = df['code'].map(lambda x: str(x).zfill(6))
return df
def _inst_detail(pageNo=1, retry_count=3, pause=0.001, dataArr=pd.DataFrame()):
ct._write_console()
for _ in range(retry_count):
time.sleep(pause)
try:
request = Request(rv.LHB_SINA_URL%(ct.P_TYPE['http'], ct.DOMAINS['vsf'], rv.LHB_KINDS[3],
ct.PAGES['fd'], '', pageNo))
text = urlopen(request, timeout=10).read()
text = text.decode('GBK')
html = lxml.html.parse(StringIO(text))
res = html.xpath("//table[@id=\"dataTable\"]/tr")
if ct.PY3:
sarr = [etree.tostring(node).decode('utf-8') for node in res]
else:
sarr = [etree.tostring(node) for node in res]
sarr = ''.join(sarr)
sarr = '<table>%s</table>'%sarr
df = pd.read_html(sarr)[0]
df.columns = rv.LHB_JGMX_COLS
dataArr = dataArr.append(df, ignore_index=True)
nextPage = html.xpath('//div[@class=\"pages\"]/a[last()]/@onclick')
if len(nextPage)>0:
pageNo = re.findall(r'\d+', nextPage[0])[0]
return _inst_detail(pageNo, retry_count, pause, dataArr)
else:
return dataArr
except Exception as e:
print(e)
def _f_rows(x):
if '%' in x[3]:
x[11] = x[6]
for i in range(6, 11):
x[i] = x[i-5]
for i in range(1, 6):
x[i] = np.NaN
return x
if __name__ == "__main__":
print(top_list('2015-06-17'))
# print(inst_detail())
| bsd-3-clause |
ContinuumIO/blaze | blaze/compatibility.py | 3 | 3880 | from __future__ import absolute_import, division, print_function
import sys
from types import MethodType
import pandas.util.testing as tm
from toolz import identity
PY3 = sys.version_info[0] == 3
PY2 = sys.version_info[0] == 2
if PY3:
import builtins
def apply(f, args, **kwargs):
return f(*args, **kwargs)
else:
import __builtin__ as builtins
apply = builtins.apply
try:
import cPickle as pickle
except ImportError:
import pickle
# Portions of this taken from the six library, licensed as follows.
#
# Copyright (c) 2010-2013 Benjamin Peterson
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from toolz.compatibility import map, zip, range, reduce
if PY2:
_inttypes = (int, long)
unicode = builtins.unicode
basestring = builtins.basestring
_strtypes = (str, unicode)
def boundmethod(func, instance):
return MethodType(func, instance, type(instance))
from itertools import izip_longest as zip_longest
from contextlib2 import ExitStack
else:
_inttypes = (int,)
_strtypes = (str,)
unicode = str
basestring = str
boundmethod = MethodType
from itertools import zip_longest
from contextlib import ExitStack
import io
try:
SEEK_END = io.SEEK_END
except AttributeError:
SEEK_END = 2
try:
import pytest
skipif = pytest.mark.skipif
xfail = pytest.mark.xfail
min_python_version = skipif(sys.version_info < (2, 7),
reason="Python >= 2.7 required")
raises = pytest.raises
except ImportError:
# TODO: move the above into a separate testing utils module
pass
if sys.version_info >= (2, 7):
from ctypes import c_ssize_t
else:
import ctypes
if ctypes.sizeof(ctypes.c_void_p) == 4:
c_ssize_t = ctypes.c_int32
else:
c_ssize_t = ctypes.c_int64
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
def assert_series_equal(left, right, check_names=True, **kwargs):
"""Backwards compatibility wrapper for
``pandas.util.testing.assert_series_equal``
Examples
--------
>>> import pandas as pd
>>> s = pd.Series(list('abc'), name='a')
>>> s2 = pd.Series(list('abc'), name='b')
>>> assert_series_equal(s, s2) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
AssertionError: ...
>>> assert_series_equal(s, s2, check_names=False)
See Also
--------
pandas.util.testing.assert_series_equal
"""
try:
return tm.assert_series_equal(left, right, check_names=check_names,
**kwargs)
except TypeError:
if check_names:
assert left.name == right.name
return tm.assert_series_equal(left, right, **kwargs)
if PY2:
def u8(cs):
return cs.decode('utf-8')
else:
u8 = identity
| bsd-3-clause |
hainm/scikit-learn | examples/svm/plot_svm_margin.py | 318 | 2328 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
SVM Margins Example
=========================================================
The plots below illustrate the effect the parameter `C` has
on the separation line. A large value of `C` basically tells
our model that we do not have that much faith in our data's
distribution, and will only consider points close to line
of separation.
A small value of `C` includes more/all the observations, allowing
the margins to be calculated using all the data in the area.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
# we create 40 separable points
np.random.seed(0)
X = np.r_[np.random.randn(20, 2) - [2, 2], np.random.randn(20, 2) + [2, 2]]
Y = [0] * 20 + [1] * 20
# figure number
fignum = 1
# fit the model
for name, penalty in (('unreg', 1), ('reg', 0.05)):
clf = svm.SVC(kernel='linear', C=penalty)
clf.fit(X, Y)
# get the separating hyperplane
w = clf.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(-5, 5)
yy = a * xx - (clf.intercept_[0]) / w[1]
# plot the parallels to the separating hyperplane that pass through the
# support vectors
margin = 1 / np.sqrt(np.sum(clf.coef_ ** 2))
yy_down = yy + a * margin
yy_up = yy - a * margin
# plot the line, the points, and the nearest vectors to the plane
plt.figure(fignum, figsize=(4, 3))
plt.clf()
plt.plot(xx, yy, 'k-')
plt.plot(xx, yy_down, 'k--')
plt.plot(xx, yy_up, 'k--')
plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1], s=80,
facecolors='none', zorder=10)
plt.scatter(X[:, 0], X[:, 1], c=Y, zorder=10, cmap=plt.cm.Paired)
plt.axis('tight')
x_min = -4.8
x_max = 4.2
y_min = -6
y_max = 6
XX, YY = np.mgrid[x_min:x_max:200j, y_min:y_max:200j]
Z = clf.predict(np.c_[XX.ravel(), YY.ravel()])
# Put the result into a color plot
Z = Z.reshape(XX.shape)
plt.figure(fignum, figsize=(4, 3))
plt.pcolormesh(XX, YY, Z, cmap=plt.cm.Paired)
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
fignum = fignum + 1
plt.show()
| bsd-3-clause |
YetAnotherTomek/egfrd | samples/irreversible/plot.py | 3 | 2672 | #!/usr/bin/python
#
# Make sure that the egfrd system is added to your PYTHONPATH
# This means, in bash for example:
# $ export PYTHONPATH=$HOME/egfrd
#
# python plot.py irr.-2.out 0.000000125 irr.-1.out 0.00000125 irr.0.out 0.0000125 irr.1.out 0.000125 irr.2.out 0.00125 irr.3.out 0.0125
# irr.-3.out 0.0000000125
import sys
import numpy
import scipy.io
from matplotlib.pylab import *
import _gfrd
from p_irr import p_irr
N_A = 6.0221367e23
N = 1000
sigma = 5e-9
r0 = sigma
D_tot = 2e-12
kf = 100 * sigma * D_tot
#kf = 0
tau = sigma*sigma / D_tot
rmin = sigma
def load_data(filename):
infile = open(filename)
data = array([float(x) for x in infile.read().split()], numpy.float)
infile.close()
return data
def plot_sol(t):
rmax = 3.1 * math.sqrt(6 * D_tot * t) + rmin
logrmin = math.log(rmin)
logrmax = math.log(rmax)
tick=(logrmax-logrmin)/N
loggrid = numpy.mgrid[logrmin:logrmax:tick]
grid = numpy.exp(loggrid)
parray = array([p_irr(r, t, r0, kf, D_tot, sigma) for r in grid])
return loglog(grid / sigma , parray * sigma, 'k-')[0]
#plot(rarray / sigma , parray, 'k-', label='theory')
def plot_hist(data, T, i):
bins = 30
nonreactions = numpy.compress(data >= sigma, data)
print 'max', max(nonreactions)
hist, r = numpy.histogram(numpy.log(nonreactions),
bins=bins)
r = r[:-1]
histsum = hist.sum()
S_sim = float(len(nonreactions)) / len(data)
print 'S_sim', S_sim
hist = hist.astype(numpy.float)
r = numpy.concatenate([r, [r[-1] - r[-2]]])
r = numpy.exp(r)
xticks = r[1:]-r[:-1]
hist /= len(data) * xticks
r = r[:-1] + (xticks * .5)
#print 'x', x
#pStyles = ['o', '^', 'v', '<', '>', 's', '+']
colors = ['b', 'g', 'r', 'c', 'm', 'y', 'k']
loglog(r / sigma, hist * sigma, colors[i] + 'o',
label=r'$T = \tau^{%d}$' % round(math.log10(T/tau)))
if __name__ == '__main__':
axes([.15,.15,.8,.8])
for i in range(len(sys.argv[1:])/2):
filename = sys.argv[i*2+1]
T = float(sys.argv[i*2+2])
print filename,T
data = load_data(filename)
plot_hist(data, T, i)
solline = plot_sol(T)
xlabel(r'$r / \sigma$', size=28)
ylabel(r'$p_{irr}$', size=28)
xlim(0.9, 2.2e2)
ylim(2e-6, 2e1)
xticks([1, 10, 100], ['1', '10', '100'], size=22)
yticks(size=18)
#solline.set_label(r'theory')
#legend(handlelen=0.02, pad=0.02,handletextsep=0.01, labelsep=0.001)
#grid()
show()
#>>> _gfrd.S_irr(.0001 * 1e-8**2/1e-12, 1e-8, 10 * 1e-8 * 1e-12, 1e-12, 1e-8)
#0.99116163945434221
| gpl-2.0 |
procoder317/scikit-learn | examples/applications/wikipedia_principal_eigenvector.py | 233 | 7819 | """
===============================
Wikipedia principal eigenvector
===============================
A classical way to assert the relative importance of vertices in a
graph is to compute the principal eigenvector of the adjacency matrix
so as to assign to each vertex the values of the components of the first
eigenvector as a centrality score:
http://en.wikipedia.org/wiki/Eigenvector_centrality
On the graph of webpages and links those values are called the PageRank
scores by Google.
The goal of this example is to analyze the graph of links inside
wikipedia articles to rank articles by relative importance according to
this eigenvector centrality.
The traditional way to compute the principal eigenvector is to use the
power iteration method:
http://en.wikipedia.org/wiki/Power_iteration
Here the computation is achieved thanks to Martinsson's Randomized SVD
algorithm implemented in the scikit.
The graph data is fetched from the DBpedia dumps. DBpedia is an extraction
of the latent structured data of the Wikipedia content.
"""
# Author: Olivier Grisel <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from bz2 import BZ2File
import os
from datetime import datetime
from pprint import pprint
from time import time
import numpy as np
from scipy import sparse
from sklearn.decomposition import randomized_svd
from sklearn.externals.joblib import Memory
from sklearn.externals.six.moves.urllib.request import urlopen
from sklearn.externals.six import iteritems
print(__doc__)
###############################################################################
# Where to download the data, if not already on disk
redirects_url = "http://downloads.dbpedia.org/3.5.1/en/redirects_en.nt.bz2"
redirects_filename = redirects_url.rsplit("/", 1)[1]
page_links_url = "http://downloads.dbpedia.org/3.5.1/en/page_links_en.nt.bz2"
page_links_filename = page_links_url.rsplit("/", 1)[1]
resources = [
(redirects_url, redirects_filename),
(page_links_url, page_links_filename),
]
for url, filename in resources:
if not os.path.exists(filename):
print("Downloading data from '%s', please wait..." % url)
opener = urlopen(url)
open(filename, 'wb').write(opener.read())
print()
###############################################################################
# Loading the redirect files
memory = Memory(cachedir=".")
def index(redirects, index_map, k):
"""Find the index of an article name after redirect resolution"""
k = redirects.get(k, k)
return index_map.setdefault(k, len(index_map))
DBPEDIA_RESOURCE_PREFIX_LEN = len("http://dbpedia.org/resource/")
SHORTNAME_SLICE = slice(DBPEDIA_RESOURCE_PREFIX_LEN + 1, -1)
def short_name(nt_uri):
"""Remove the < and > URI markers and the common URI prefix"""
return nt_uri[SHORTNAME_SLICE]
def get_redirects(redirects_filename):
"""Parse the redirections and build a transitively closed map out of it"""
redirects = {}
print("Parsing the NT redirect file")
for l, line in enumerate(BZ2File(redirects_filename)):
split = line.split()
if len(split) != 4:
print("ignoring malformed line: " + line)
continue
redirects[short_name(split[0])] = short_name(split[2])
if l % 1000000 == 0:
print("[%s] line: %08d" % (datetime.now().isoformat(), l))
# compute the transitive closure
print("Computing the transitive closure of the redirect relation")
for l, source in enumerate(redirects.keys()):
transitive_target = None
target = redirects[source]
seen = set([source])
while True:
transitive_target = target
target = redirects.get(target)
if target is None or target in seen:
break
seen.add(target)
redirects[source] = transitive_target
if l % 1000000 == 0:
print("[%s] line: %08d" % (datetime.now().isoformat(), l))
return redirects
# disabling joblib as the pickling of large dicts seems much too slow
#@memory.cache
def get_adjacency_matrix(redirects_filename, page_links_filename, limit=None):
"""Extract the adjacency graph as a scipy sparse matrix
Redirects are resolved first.
Returns X, the scipy sparse adjacency matrix, redirects as python
dict from article names to article names and index_map a python dict
from article names to python int (article indexes).
"""
print("Computing the redirect map")
redirects = get_redirects(redirects_filename)
print("Computing the integer index map")
index_map = dict()
links = list()
for l, line in enumerate(BZ2File(page_links_filename)):
split = line.split()
if len(split) != 4:
print("ignoring malformed line: " + line)
continue
i = index(redirects, index_map, short_name(split[0]))
j = index(redirects, index_map, short_name(split[2]))
links.append((i, j))
if l % 1000000 == 0:
print("[%s] line: %08d" % (datetime.now().isoformat(), l))
if limit is not None and l >= limit - 1:
break
print("Computing the adjacency matrix")
X = sparse.lil_matrix((len(index_map), len(index_map)), dtype=np.float32)
for i, j in links:
X[i, j] = 1.0
del links
print("Converting to CSR representation")
X = X.tocsr()
print("CSR conversion done")
return X, redirects, index_map
# stop after 5M links to make it possible to work in RAM
X, redirects, index_map = get_adjacency_matrix(
redirects_filename, page_links_filename, limit=5000000)
names = dict((i, name) for name, i in iteritems(index_map))
print("Computing the principal singular vectors using randomized_svd")
t0 = time()
U, s, V = randomized_svd(X, 5, n_iter=3)
print("done in %0.3fs" % (time() - t0))
# print the names of the wikipedia related strongest compenents of the the
# principal singular vector which should be similar to the highest eigenvector
print("Top wikipedia pages according to principal singular vectors")
pprint([names[i] for i in np.abs(U.T[0]).argsort()[-10:]])
pprint([names[i] for i in np.abs(V[0]).argsort()[-10:]])
def centrality_scores(X, alpha=0.85, max_iter=100, tol=1e-10):
"""Power iteration computation of the principal eigenvector
This method is also known as Google PageRank and the implementation
is based on the one from the NetworkX project (BSD licensed too)
with copyrights by:
Aric Hagberg <[email protected]>
Dan Schult <[email protected]>
Pieter Swart <[email protected]>
"""
n = X.shape[0]
X = X.copy()
incoming_counts = np.asarray(X.sum(axis=1)).ravel()
print("Normalizing the graph")
for i in incoming_counts.nonzero()[0]:
X.data[X.indptr[i]:X.indptr[i + 1]] *= 1.0 / incoming_counts[i]
dangle = np.asarray(np.where(X.sum(axis=1) == 0, 1.0 / n, 0)).ravel()
scores = np.ones(n, dtype=np.float32) / n # initial guess
for i in range(max_iter):
print("power iteration #%d" % i)
prev_scores = scores
scores = (alpha * (scores * X + np.dot(dangle, prev_scores))
+ (1 - alpha) * prev_scores.sum() / n)
# check convergence: normalized l_inf norm
scores_max = np.abs(scores).max()
if scores_max == 0.0:
scores_max = 1.0
err = np.abs(scores - prev_scores).max() / scores_max
print("error: %0.6f" % err)
if err < n * tol:
return scores
return scores
print("Computing principal eigenvector score using a power iteration method")
t0 = time()
scores = centrality_scores(X, max_iter=100, tol=1e-10)
print("done in %0.3fs" % (time() - t0))
pprint([names[i] for i in np.abs(scores).argsort()[-10:]])
| bsd-3-clause |
courtarro/gnuradio-wg-grc | gr-digital/examples/example_fll.py | 49 | 5715 | #!/usr/bin/env python
#
# Copyright 2011-2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, digital, filter
from gnuradio import blocks
from gnuradio import channels
from gnuradio import eng_notation
from gnuradio.eng_option import eng_option
from optparse import OptionParser
import sys
try:
import scipy
except ImportError:
print "Error: could not import scipy (http://www.scipy.org/)"
sys.exit(1)
try:
import pylab
except ImportError:
print "Error: could not import pylab (http://matplotlib.sourceforge.net/)"
sys.exit(1)
class example_fll(gr.top_block):
def __init__(self, N, sps, rolloff, ntaps, bw, noise, foffset, toffset, poffset):
gr.top_block.__init__(self)
rrc_taps = filter.firdes.root_raised_cosine(
sps, sps, 1.0, rolloff, ntaps)
data = 2.0*scipy.random.randint(0, 2, N) - 1.0
data = scipy.exp(1j*poffset) * data
self.src = blocks.vector_source_c(data.tolist(), False)
self.rrc = filter.interp_fir_filter_ccf(sps, rrc_taps)
self.chn = channels.channel_model(noise, foffset, toffset)
self.fll = digital.fll_band_edge_cc(sps, rolloff, ntaps, bw)
self.vsnk_src = blocks.vector_sink_c()
self.vsnk_fll = blocks.vector_sink_c()
self.vsnk_frq = blocks.vector_sink_f()
self.vsnk_phs = blocks.vector_sink_f()
self.vsnk_err = blocks.vector_sink_f()
self.connect(self.src, self.rrc, self.chn, self.fll, self.vsnk_fll)
self.connect(self.rrc, self.vsnk_src)
self.connect((self.fll,1), self.vsnk_frq)
self.connect((self.fll,2), self.vsnk_phs)
self.connect((self.fll,3), self.vsnk_err)
def main():
parser = OptionParser(option_class=eng_option, conflict_handler="resolve")
parser.add_option("-N", "--nsamples", type="int", default=2000,
help="Set the number of samples to process [default=%default]")
parser.add_option("-S", "--sps", type="int", default=4,
help="Set the samples per symbol [default=%default]")
parser.add_option("-r", "--rolloff", type="eng_float", default=0.35,
help="Set the rolloff factor [default=%default]")
parser.add_option("-W", "--bandwidth", type="eng_float", default=2*scipy.pi/100.0,
help="Set the loop bandwidth [default=%default]")
parser.add_option("-n", "--ntaps", type="int", default=45,
help="Set the number of taps in the filters [default=%default]")
parser.add_option("", "--noise", type="eng_float", default=0.0,
help="Set the simulation noise voltage [default=%default]")
parser.add_option("-f", "--foffset", type="eng_float", default=0.2,
help="Set the simulation's normalized frequency offset (in Hz) [default=%default]")
parser.add_option("-t", "--toffset", type="eng_float", default=1.0,
help="Set the simulation's timing offset [default=%default]")
parser.add_option("-p", "--poffset", type="eng_float", default=0.0,
help="Set the simulation's phase offset [default=%default]")
(options, args) = parser.parse_args ()
# Adjust N for the interpolation by sps
options.nsamples = options.nsamples // options.sps
# Set up the program-under-test
put = example_fll(options.nsamples, options.sps, options.rolloff,
options.ntaps, options.bandwidth, options.noise,
options.foffset, options.toffset, options.poffset)
put.run()
data_src = scipy.array(put.vsnk_src.data())
data_err = scipy.array(put.vsnk_err.data())
# Convert the FLL's LO frequency from rads/sec to Hz
data_frq = scipy.array(put.vsnk_frq.data()) / (2.0*scipy.pi)
# adjust this to align with the data. There are 2 filters of
# ntaps long and the channel introduces another 4 sample delay.
data_fll = scipy.array(put.vsnk_fll.data()[2*options.ntaps-4:])
# Plot the FLL's LO frequency
f1 = pylab.figure(1, figsize=(12,10))
s1 = f1.add_subplot(2,2,1)
s1.plot(data_frq)
s1.set_title("FLL LO")
s1.set_xlabel("Samples")
s1.set_ylabel("Frequency (normalized Hz)")
# Plot the FLL's error
s2 = f1.add_subplot(2,2,2)
s2.plot(data_err)
s2.set_title("FLL Error")
s2.set_xlabel("Samples")
s2.set_ylabel("FLL Loop error")
# Plot the IQ symbols
s3 = f1.add_subplot(2,2,3)
s3.plot(data_src.real, data_src.imag, "o")
s3.plot(data_fll.real, data_fll.imag, "rx")
s3.set_title("IQ")
s3.set_xlabel("Real part")
s3.set_ylabel("Imag part")
# Plot the symbols in time
s4 = f1.add_subplot(2,2,4)
s4.plot(data_src.real, "o-")
s4.plot(data_fll.real, "rx-")
s4.set_title("Symbols")
s4.set_xlabel("Samples")
s4.set_ylabel("Real Part of Signals")
pylab.show()
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
| gpl-3.0 |
mtb-za/fatiando | cookbook/seismic_wavefd_rayleigh_wave.py | 9 | 2865 | """
Seismic: 2D finite difference simulation of elastic P and SV wave propagation
in a medium with a discontinuity (i.e., Moho), generating Rayleigh waves
"""
import numpy as np
from matplotlib import animation
from fatiando import gridder
from fatiando.seismic import wavefd
from fatiando.vis import mpl
# Set the parameters of the finite difference grid
shape = (150, 900)
area = [0, 540000, 0, 90000]
# Make a density and wave velocity model
density = 2400 * np.ones(shape)
svel = 3700 * np.ones(shape)
pvel = 6600 * np.ones(shape)
moho = 50
density[moho:] = 2800
svel[moho:] = 4300
pvel[moho:] = 7500
mu = wavefd.lame_mu(svel, density)
lamb = wavefd.lame_lamb(pvel, svel, density)
# Make a wave source from a mexican hat wavelet for the x and z directions
sources = [
[wavefd.MexHatSource(10000, 10000, area, shape, 100000, 0.5, delay=2)],
[wavefd.MexHatSource(10000, 10000, area, shape, 100000, 0.5, delay=2)]]
# Get the iterator. This part only generates an iterator object. The actual
# computations take place at each iteration in the for loop below
dt = wavefd.maxdt(area, shape, pvel.max())
duration = 130
maxit = int(duration / dt)
stations = [[400000, 0]]
snapshots = int(1. / dt)
simulation = wavefd.elastic_psv(lamb, mu, density, area, dt, maxit, sources,
stations, snapshots, padding=70, taper=0.005,
xz2ps=True)
# This part makes an animation using matplotlibs animation API
background = 10 ** -5 * ((density - density.min()) / density.max())
fig = mpl.figure(figsize=(10, 8))
mpl.subplots_adjust(right=0.98, left=0.11, hspace=0.3, top=0.93)
mpl.subplot(3, 1, 1)
mpl.title('x seismogram')
xseismogram, = mpl.plot([], [], '-k')
mpl.xlim(0, duration)
mpl.ylim(-0.05, 0.05)
mpl.ylabel('Amplitude')
mpl.subplot(3, 1, 2)
mpl.title('z seismogram')
zseismogram, = mpl.plot([], [], '-k')
mpl.xlim(0, duration)
mpl.ylim(-0.05, 0.05)
mpl.ylabel('Amplitude')
ax = mpl.subplot(3, 1, 3)
mpl.title('time: 0.0 s')
wavefield = mpl.imshow(background, extent=area, cmap=mpl.cm.gray_r,
vmin=-0.00001, vmax=0.00001)
mpl.points(stations, '^b', size=8)
mpl.text(500000, 20000, 'Crust')
mpl.text(500000, 60000, 'Mantle')
fig.text(0.7, 0.31, 'Seismometer')
mpl.xlim(area[:2])
mpl.ylim(area[2:][::-1])
mpl.xlabel('x (km)')
mpl.ylabel('z (km)')
mpl.m2km()
times = np.linspace(0, dt * maxit, maxit)
# This function updates the plot every few timesteps
def animate(i):
t, p, s, xcomp, zcomp = simulation.next()
mpl.title('time: %0.1f s' % (times[t]))
wavefield.set_array((background + p + s)[::-1])
xseismogram.set_data(times[:t + 1], xcomp[0][:t + 1])
zseismogram.set_data(times[:t + 1], zcomp[0][:t + 1])
return wavefield, xseismogram, zseismogram
anim = animation.FuncAnimation(
fig, animate, frames=maxit / snapshots, interval=1)
mpl.show()
| bsd-3-clause |
fabianp/scikit-learn | sklearn/cluster/tests/test_hierarchical.py | 230 | 19795 | """
Several basic tests for hierarchical clustering procedures
"""
# Authors: Vincent Michel, 2010, Gael Varoquaux 2012,
# Matteo Visconti di Oleggio Castello 2014
# License: BSD 3 clause
from tempfile import mkdtemp
import shutil
from functools import partial
import numpy as np
from scipy import sparse
from scipy.cluster import hierarchy
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import ignore_warnings
from sklearn.cluster import ward_tree
from sklearn.cluster import AgglomerativeClustering, FeatureAgglomeration
from sklearn.cluster.hierarchical import (_hc_cut, _TREE_BUILDERS,
linkage_tree)
from sklearn.feature_extraction.image import grid_to_graph
from sklearn.metrics.pairwise import PAIRED_DISTANCES, cosine_distances,\
manhattan_distances, pairwise_distances
from sklearn.metrics.cluster import normalized_mutual_info_score
from sklearn.neighbors.graph import kneighbors_graph
from sklearn.cluster._hierarchical import average_merge, max_merge
from sklearn.utils.fast_dict import IntFloatDict
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_warns
def test_linkage_misc():
# Misc tests on linkage
rng = np.random.RandomState(42)
X = rng.normal(size=(5, 5))
assert_raises(ValueError, AgglomerativeClustering(linkage='foo').fit, X)
assert_raises(ValueError, linkage_tree, X, linkage='foo')
assert_raises(ValueError, linkage_tree, X, connectivity=np.ones((4, 4)))
# Smoke test FeatureAgglomeration
FeatureAgglomeration().fit(X)
# test hiearchical clustering on a precomputed distances matrix
dis = cosine_distances(X)
res = linkage_tree(dis, affinity="precomputed")
assert_array_equal(res[0], linkage_tree(X, affinity="cosine")[0])
# test hiearchical clustering on a precomputed distances matrix
res = linkage_tree(X, affinity=manhattan_distances)
assert_array_equal(res[0], linkage_tree(X, affinity="manhattan")[0])
def test_structured_linkage_tree():
# Check that we obtain the correct solution for structured linkage trees.
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
# Avoiding a mask with only 'True' entries
mask[4:7, 4:7] = 0
X = rng.randn(50, 100)
connectivity = grid_to_graph(*mask.shape)
for tree_builder in _TREE_BUILDERS.values():
children, n_components, n_leaves, parent = \
tree_builder(X.T, connectivity)
n_nodes = 2 * X.shape[1] - 1
assert_true(len(children) + n_leaves == n_nodes)
# Check that ward_tree raises a ValueError with a connectivity matrix
# of the wrong shape
assert_raises(ValueError,
tree_builder, X.T, np.ones((4, 4)))
# Check that fitting with no samples raises an error
assert_raises(ValueError,
tree_builder, X.T[:0], connectivity)
def test_unstructured_linkage_tree():
# Check that we obtain the correct solution for unstructured linkage trees.
rng = np.random.RandomState(0)
X = rng.randn(50, 100)
for this_X in (X, X[0]):
# With specified a number of clusters just for the sake of
# raising a warning and testing the warning code
with ignore_warnings():
children, n_nodes, n_leaves, parent = assert_warns(
UserWarning, ward_tree, this_X.T, n_clusters=10)
n_nodes = 2 * X.shape[1] - 1
assert_equal(len(children) + n_leaves, n_nodes)
for tree_builder in _TREE_BUILDERS.values():
for this_X in (X, X[0]):
with ignore_warnings():
children, n_nodes, n_leaves, parent = assert_warns(
UserWarning, tree_builder, this_X.T, n_clusters=10)
n_nodes = 2 * X.shape[1] - 1
assert_equal(len(children) + n_leaves, n_nodes)
def test_height_linkage_tree():
# Check that the height of the results of linkage tree is sorted.
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
X = rng.randn(50, 100)
connectivity = grid_to_graph(*mask.shape)
for linkage_func in _TREE_BUILDERS.values():
children, n_nodes, n_leaves, parent = linkage_func(X.T, connectivity)
n_nodes = 2 * X.shape[1] - 1
assert_true(len(children) + n_leaves == n_nodes)
def test_agglomerative_clustering():
# Check that we obtain the correct number of clusters with
# agglomerative clustering.
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
n_samples = 100
X = rng.randn(n_samples, 50)
connectivity = grid_to_graph(*mask.shape)
for linkage in ("ward", "complete", "average"):
clustering = AgglomerativeClustering(n_clusters=10,
connectivity=connectivity,
linkage=linkage)
clustering.fit(X)
# test caching
try:
tempdir = mkdtemp()
clustering = AgglomerativeClustering(
n_clusters=10, connectivity=connectivity,
memory=tempdir,
linkage=linkage)
clustering.fit(X)
labels = clustering.labels_
assert_true(np.size(np.unique(labels)) == 10)
finally:
shutil.rmtree(tempdir)
# Turn caching off now
clustering = AgglomerativeClustering(
n_clusters=10, connectivity=connectivity, linkage=linkage)
# Check that we obtain the same solution with early-stopping of the
# tree building
clustering.compute_full_tree = False
clustering.fit(X)
assert_almost_equal(normalized_mutual_info_score(clustering.labels_,
labels), 1)
clustering.connectivity = None
clustering.fit(X)
assert_true(np.size(np.unique(clustering.labels_)) == 10)
# Check that we raise a TypeError on dense matrices
clustering = AgglomerativeClustering(
n_clusters=10,
connectivity=sparse.lil_matrix(
connectivity.toarray()[:10, :10]),
linkage=linkage)
assert_raises(ValueError, clustering.fit, X)
# Test that using ward with another metric than euclidean raises an
# exception
clustering = AgglomerativeClustering(
n_clusters=10,
connectivity=connectivity.toarray(),
affinity="manhattan",
linkage="ward")
assert_raises(ValueError, clustering.fit, X)
# Test using another metric than euclidean works with linkage complete
for affinity in PAIRED_DISTANCES.keys():
# Compare our (structured) implementation to scipy
clustering = AgglomerativeClustering(
n_clusters=10,
connectivity=np.ones((n_samples, n_samples)),
affinity=affinity,
linkage="complete")
clustering.fit(X)
clustering2 = AgglomerativeClustering(
n_clusters=10,
connectivity=None,
affinity=affinity,
linkage="complete")
clustering2.fit(X)
assert_almost_equal(normalized_mutual_info_score(clustering2.labels_,
clustering.labels_),
1)
# Test that using a distance matrix (affinity = 'precomputed') has same
# results (with connectivity constraints)
clustering = AgglomerativeClustering(n_clusters=10,
connectivity=connectivity,
linkage="complete")
clustering.fit(X)
X_dist = pairwise_distances(X)
clustering2 = AgglomerativeClustering(n_clusters=10,
connectivity=connectivity,
affinity='precomputed',
linkage="complete")
clustering2.fit(X_dist)
assert_array_equal(clustering.labels_, clustering2.labels_)
def test_ward_agglomeration():
# Check that we obtain the correct solution in a simplistic case
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
X = rng.randn(50, 100)
connectivity = grid_to_graph(*mask.shape)
agglo = FeatureAgglomeration(n_clusters=5, connectivity=connectivity)
agglo.fit(X)
assert_true(np.size(np.unique(agglo.labels_)) == 5)
X_red = agglo.transform(X)
assert_true(X_red.shape[1] == 5)
X_full = agglo.inverse_transform(X_red)
assert_true(np.unique(X_full[0]).size == 5)
assert_array_almost_equal(agglo.transform(X_full), X_red)
# Check that fitting with no samples raises a ValueError
assert_raises(ValueError, agglo.fit, X[:0])
def assess_same_labelling(cut1, cut2):
"""Util for comparison with scipy"""
co_clust = []
for cut in [cut1, cut2]:
n = len(cut)
k = cut.max() + 1
ecut = np.zeros((n, k))
ecut[np.arange(n), cut] = 1
co_clust.append(np.dot(ecut, ecut.T))
assert_true((co_clust[0] == co_clust[1]).all())
def test_scikit_vs_scipy():
# Test scikit linkage with full connectivity (i.e. unstructured) vs scipy
n, p, k = 10, 5, 3
rng = np.random.RandomState(0)
# Not using a lil_matrix here, just to check that non sparse
# matrices are well handled
connectivity = np.ones((n, n))
for linkage in _TREE_BUILDERS.keys():
for i in range(5):
X = .1 * rng.normal(size=(n, p))
X -= 4. * np.arange(n)[:, np.newaxis]
X -= X.mean(axis=1)[:, np.newaxis]
out = hierarchy.linkage(X, method=linkage)
children_ = out[:, :2].astype(np.int)
children, _, n_leaves, _ = _TREE_BUILDERS[linkage](X, connectivity)
cut = _hc_cut(k, children, n_leaves)
cut_ = _hc_cut(k, children_, n_leaves)
assess_same_labelling(cut, cut_)
# Test error management in _hc_cut
assert_raises(ValueError, _hc_cut, n_leaves + 1, children, n_leaves)
def test_connectivity_propagation():
# Check that connectivity in the ward tree is propagated correctly during
# merging.
X = np.array([(.014, .120), (.014, .099), (.014, .097),
(.017, .153), (.017, .153), (.018, .153),
(.018, .153), (.018, .153), (.018, .153),
(.018, .153), (.018, .153), (.018, .153),
(.018, .152), (.018, .149), (.018, .144)])
connectivity = kneighbors_graph(X, 10, include_self=False)
ward = AgglomerativeClustering(
n_clusters=4, connectivity=connectivity, linkage='ward')
# If changes are not propagated correctly, fit crashes with an
# IndexError
ward.fit(X)
def test_ward_tree_children_order():
# Check that children are ordered in the same way for both structured and
# unstructured versions of ward_tree.
# test on five random datasets
n, p = 10, 5
rng = np.random.RandomState(0)
connectivity = np.ones((n, n))
for i in range(5):
X = .1 * rng.normal(size=(n, p))
X -= 4. * np.arange(n)[:, np.newaxis]
X -= X.mean(axis=1)[:, np.newaxis]
out_unstructured = ward_tree(X)
out_structured = ward_tree(X, connectivity=connectivity)
assert_array_equal(out_unstructured[0], out_structured[0])
def test_ward_linkage_tree_return_distance():
# Test return_distance option on linkage and ward trees
# test that return_distance when set true, gives same
# output on both structured and unstructured clustering.
n, p = 10, 5
rng = np.random.RandomState(0)
connectivity = np.ones((n, n))
for i in range(5):
X = .1 * rng.normal(size=(n, p))
X -= 4. * np.arange(n)[:, np.newaxis]
X -= X.mean(axis=1)[:, np.newaxis]
out_unstructured = ward_tree(X, return_distance=True)
out_structured = ward_tree(X, connectivity=connectivity,
return_distance=True)
# get children
children_unstructured = out_unstructured[0]
children_structured = out_structured[0]
# check if we got the same clusters
assert_array_equal(children_unstructured, children_structured)
# check if the distances are the same
dist_unstructured = out_unstructured[-1]
dist_structured = out_structured[-1]
assert_array_almost_equal(dist_unstructured, dist_structured)
for linkage in ['average', 'complete']:
structured_items = linkage_tree(
X, connectivity=connectivity, linkage=linkage,
return_distance=True)[-1]
unstructured_items = linkage_tree(
X, linkage=linkage, return_distance=True)[-1]
structured_dist = structured_items[-1]
unstructured_dist = unstructured_items[-1]
structured_children = structured_items[0]
unstructured_children = unstructured_items[0]
assert_array_almost_equal(structured_dist, unstructured_dist)
assert_array_almost_equal(
structured_children, unstructured_children)
# test on the following dataset where we know the truth
# taken from scipy/cluster/tests/hierarchy_test_data.py
X = np.array([[1.43054825, -7.5693489],
[6.95887839, 6.82293382],
[2.87137846, -9.68248579],
[7.87974764, -6.05485803],
[8.24018364, -6.09495602],
[7.39020262, 8.54004355]])
# truth
linkage_X_ward = np.array([[3., 4., 0.36265956, 2.],
[1., 5., 1.77045373, 2.],
[0., 2., 2.55760419, 2.],
[6., 8., 9.10208346, 4.],
[7., 9., 24.7784379, 6.]])
linkage_X_complete = np.array(
[[3., 4., 0.36265956, 2.],
[1., 5., 1.77045373, 2.],
[0., 2., 2.55760419, 2.],
[6., 8., 6.96742194, 4.],
[7., 9., 18.77445997, 6.]])
linkage_X_average = np.array(
[[3., 4., 0.36265956, 2.],
[1., 5., 1.77045373, 2.],
[0., 2., 2.55760419, 2.],
[6., 8., 6.55832839, 4.],
[7., 9., 15.44089605, 6.]])
n_samples, n_features = np.shape(X)
connectivity_X = np.ones((n_samples, n_samples))
out_X_unstructured = ward_tree(X, return_distance=True)
out_X_structured = ward_tree(X, connectivity=connectivity_X,
return_distance=True)
# check that the labels are the same
assert_array_equal(linkage_X_ward[:, :2], out_X_unstructured[0])
assert_array_equal(linkage_X_ward[:, :2], out_X_structured[0])
# check that the distances are correct
assert_array_almost_equal(linkage_X_ward[:, 2], out_X_unstructured[4])
assert_array_almost_equal(linkage_X_ward[:, 2], out_X_structured[4])
linkage_options = ['complete', 'average']
X_linkage_truth = [linkage_X_complete, linkage_X_average]
for (linkage, X_truth) in zip(linkage_options, X_linkage_truth):
out_X_unstructured = linkage_tree(
X, return_distance=True, linkage=linkage)
out_X_structured = linkage_tree(
X, connectivity=connectivity_X, linkage=linkage,
return_distance=True)
# check that the labels are the same
assert_array_equal(X_truth[:, :2], out_X_unstructured[0])
assert_array_equal(X_truth[:, :2], out_X_structured[0])
# check that the distances are correct
assert_array_almost_equal(X_truth[:, 2], out_X_unstructured[4])
assert_array_almost_equal(X_truth[:, 2], out_X_structured[4])
def test_connectivity_fixing_non_lil():
# Check non regression of a bug if a non item assignable connectivity is
# provided with more than one component.
# create dummy data
x = np.array([[0, 0], [1, 1]])
# create a mask with several components to force connectivity fixing
m = np.array([[True, False], [False, True]])
c = grid_to_graph(n_x=2, n_y=2, mask=m)
w = AgglomerativeClustering(connectivity=c, linkage='ward')
assert_warns(UserWarning, w.fit, x)
def test_int_float_dict():
rng = np.random.RandomState(0)
keys = np.unique(rng.randint(100, size=10).astype(np.intp))
values = rng.rand(len(keys))
d = IntFloatDict(keys, values)
for key, value in zip(keys, values):
assert d[key] == value
other_keys = np.arange(50).astype(np.intp)[::2]
other_values = 0.5 * np.ones(50)[::2]
other = IntFloatDict(other_keys, other_values)
# Complete smoke test
max_merge(d, other, mask=np.ones(100, dtype=np.intp), n_a=1, n_b=1)
average_merge(d, other, mask=np.ones(100, dtype=np.intp), n_a=1, n_b=1)
def test_connectivity_callable():
rng = np.random.RandomState(0)
X = rng.rand(20, 5)
connectivity = kneighbors_graph(X, 3, include_self=False)
aglc1 = AgglomerativeClustering(connectivity=connectivity)
aglc2 = AgglomerativeClustering(
connectivity=partial(kneighbors_graph, n_neighbors=3, include_self=False))
aglc1.fit(X)
aglc2.fit(X)
assert_array_equal(aglc1.labels_, aglc2.labels_)
def test_connectivity_ignores_diagonal():
rng = np.random.RandomState(0)
X = rng.rand(20, 5)
connectivity = kneighbors_graph(X, 3, include_self=False)
connectivity_include_self = kneighbors_graph(X, 3, include_self=True)
aglc1 = AgglomerativeClustering(connectivity=connectivity)
aglc2 = AgglomerativeClustering(connectivity=connectivity_include_self)
aglc1.fit(X)
aglc2.fit(X)
assert_array_equal(aglc1.labels_, aglc2.labels_)
def test_compute_full_tree():
# Test that the full tree is computed if n_clusters is small
rng = np.random.RandomState(0)
X = rng.randn(10, 2)
connectivity = kneighbors_graph(X, 5, include_self=False)
# When n_clusters is less, the full tree should be built
# that is the number of merges should be n_samples - 1
agc = AgglomerativeClustering(n_clusters=2, connectivity=connectivity)
agc.fit(X)
n_samples = X.shape[0]
n_nodes = agc.children_.shape[0]
assert_equal(n_nodes, n_samples - 1)
# When n_clusters is large, greater than max of 100 and 0.02 * n_samples.
# we should stop when there are n_clusters.
n_clusters = 101
X = rng.randn(200, 2)
connectivity = kneighbors_graph(X, 10, include_self=False)
agc = AgglomerativeClustering(n_clusters=n_clusters,
connectivity=connectivity)
agc.fit(X)
n_samples = X.shape[0]
n_nodes = agc.children_.shape[0]
assert_equal(n_nodes, n_samples - n_clusters)
def test_n_components():
# Test n_components returned by linkage, average and ward tree
rng = np.random.RandomState(0)
X = rng.rand(5, 5)
# Connectivity matrix having five components.
connectivity = np.eye(5)
for linkage_func in _TREE_BUILDERS.values():
assert_equal(ignore_warnings(linkage_func)(X, connectivity)[1], 5)
def test_agg_n_clusters():
# Test that an error is raised when n_clusters <= 0
rng = np.random.RandomState(0)
X = rng.rand(20, 10)
for n_clus in [-1, 0]:
agc = AgglomerativeClustering(n_clusters=n_clus)
msg = ("n_clusters should be an integer greater than 0."
" %s was provided." % str(agc.n_clusters))
assert_raise_message(ValueError, msg, agc.fit, X)
| bsd-3-clause |
gojomo/gensim | gensim/sklearn_api/d2vmodel.py | 1 | 9435 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2011 Radim Rehurek <[email protected]>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""Scikit learn interface for :class:`~gensim.models.doc2vec.Doc2Vec`.
Follows scikit-learn API conventions to facilitate using gensim along with scikit-learn.
Examples
--------
.. sourcecode:: pycon
>>> from gensim.test.utils import common_texts
>>> from gensim.sklearn_api import D2VTransformer
>>>
>>> model = D2VTransformer(min_count=1, size=5)
>>> docvecs = model.fit_transform(common_texts) # represent `common_texts` as vectors
"""
import numpy as np
from six import string_types
from sklearn.base import TransformerMixin, BaseEstimator
from sklearn.exceptions import NotFittedError
from gensim import models
from gensim.models import doc2vec
class D2VTransformer(TransformerMixin, BaseEstimator):
"""Base Doc2Vec module, wraps :class:`~gensim.models.doc2vec.Doc2Vec`.
This model based on `Quoc Le, Tomas Mikolov: "Distributed Representations of Sentences and Documents"
<https://cs.stanford.edu/~quocle/paragraph_vector.pdf>`_.
"""
def __init__(self, dm_mean=None, dm=1, dbow_words=0, dm_concat=0, dm_tag_count=1, dv=None,
dv_mapfile=None, comment=None, trim_rule=None, vector_size=100, alpha=0.025, window=5,
min_count=5, max_vocab_size=None, sample=1e-3, seed=1, workers=3, min_alpha=0.0001,
hs=0, negative=5, cbow_mean=1,
hashfxn=hash, epochs=5, sorted_vocab=1, batch_words=10000):
"""
Parameters
----------
dm_mean : int {1,0}, optional
If 0, use the sum of the context word vectors. If 1, use the mean. Only applies when `dm_concat=0`.
dm : int {1,0}, optional
Defines the training algorithm. If `dm=1` - distributed memory (PV-DM) is used.
Otherwise, distributed bag of words (PV-DBOW) is employed.
dbow_words : int {1,0}, optional
If set to 1 - trains word-vectors (in skip-gram fashion) simultaneous with DBOW
doc-vector training, If 0, only trains doc-vectors (faster).
dm_concat : int {1,0}, optional
If 1, use concatenation of context vectors rather than sum/average.
Note concatenation results in a much-larger model, as the input is no longer the size of one
(sampled or arithmetically combined) word vector, but the size of the tag(s) and all words
in the context strung together.
dm_tag_count : int, optional
Expected constant number of document tags per document, when using dm_concat mode.
dv : :class:`~gensim.models.keyedvectors.KeyedVectors`
A mapping from a string or int tag to its vector representation.
dv_mapfile : str, optional
Path to a file containing the docvecs mapping. If `dv` is None, this file will be used to create it.
comment : str, optional
A model descriptive comment, used for logging and debugging purposes.
trim_rule : function ((str, int, int) -> int), optional
Vocabulary trimming rule that accepts (word, count, min_count).
Specifies whether certain words should remain in the vocabulary (:attr:`gensim.utils.RULE_KEEP`),
be trimmed away (:attr:`gensim.utils.RULE_DISCARD`), or handled using the default
(:attr:`gensim.utils.RULE_DEFAULT`).
If None, then :func:`gensim.utils.keep_vocab_item` will be used.
vector_size : int, optional
Dimensionality of the feature vectors.
alpha : float, optional
The initial learning rate.
window : int, optional
The maximum distance between the current and predicted word within a sentence.
min_count : int, optional
Ignores all words with total frequency lower than this.
max_vocab_size : int, optional
Limits the RAM during vocabulary building; if there are more unique
words than this, then prune the infrequent ones. Every 10 million word types need about 1GB of RAM.
Set to `None` for no limit.
sample : float, optional
The threshold for configuring which higher-frequency words are randomly downsampled,
useful range is (0, 1e-5).
seed : int, optional
Seed for the random number generator. Initial vectors for each word are seeded with a hash of
the concatenation of word + `str(seed)`.
Note that for a **fully deterministically-reproducible run**, you **must also limit the model to
a single worker thread (`workers=1`)**, to eliminate ordering jitter from OS thread scheduling.
In Python 3, reproducibility between interpreter launches also requires use of the `PYTHONHASHSEED`
environment variable to control hash randomization.
workers : int, optional
Use this many worker threads to train the model. Will yield a speedup when training with multicore machines.
min_alpha : float, optional
Learning rate will linearly drop to `min_alpha` as training progresses.
hs : int {1,0}, optional
If 1, hierarchical softmax will be used for model training. If set to 0, and `negative` is non-zero,
negative sampling will be used.
negative : int, optional
If > 0, negative sampling will be used, the int for negative specifies how many "noise words"
should be drawn (usually between 5-20). If set to 0, no negative sampling is used.
cbow_mean : int, optional
Same as `dm_mean`, **unused**.
hashfxn : function (object -> int), optional
A hashing function. Used to create an initial random reproducible vector by hashing the random seed.
epochs : int, optional
Number of epochs to iterate through the corpus.
sorted_vocab : bool, optional
Whether the vocabulary should be sorted internally.
batch_words : int, optional
Number of words to be handled by each job.
"""
self.gensim_model = None
self.dm_mean = dm_mean
self.dm = dm
self.dbow_words = dbow_words
self.dm_concat = dm_concat
self.dm_tag_count = dm_tag_count
self.dv = dv
self.dv_mapfile = dv_mapfile
self.comment = comment
self.trim_rule = trim_rule
# attributes associated with gensim.models.Word2Vec
self.vector_size = vector_size
self.alpha = alpha
self.window = window
self.min_count = min_count
self.max_vocab_size = max_vocab_size
self.sample = sample
self.seed = seed
self.workers = workers
self.min_alpha = min_alpha
self.hs = hs
self.negative = negative
self.cbow_mean = int(cbow_mean)
self.hashfxn = hashfxn
self.epochs = epochs
self.sorted_vocab = sorted_vocab
self.batch_words = batch_words
def fit(self, X, y=None):
"""Fit the model according to the given training data.
Parameters
----------
X : {iterable of :class:`~gensim.models.doc2vec.TaggedDocument`, iterable of list of str}
A collection of tagged documents used for training the model.
Returns
-------
:class:`~gensim.sklearn_api.d2vmodel.D2VTransformer`
The trained model.
"""
if isinstance([i for i in X[:1]][0], doc2vec.TaggedDocument):
d2v_sentences = X
else:
d2v_sentences = [doc2vec.TaggedDocument(words, [i]) for i, words in enumerate(X)]
self.gensim_model = models.Doc2Vec(
documents=d2v_sentences, dm_mean=self.dm_mean, dm=self.dm,
dbow_words=self.dbow_words, dm_concat=self.dm_concat, dm_tag_count=self.dm_tag_count,
dv=self.dv, dv_mapfile=self.dv_mapfile, comment=self.comment,
trim_rule=self.trim_rule, vector_size=self.vector_size, alpha=self.alpha, window=self.window,
min_count=self.min_count, max_vocab_size=self.max_vocab_size, sample=self.sample,
seed=self.seed, workers=self.workers, min_alpha=self.min_alpha, hs=self.hs,
negative=self.negative, cbow_mean=self.cbow_mean, hashfxn=self.hashfxn,
epochs=self.epochs, sorted_vocab=self.sorted_vocab, batch_words=self.batch_words
)
return self
def transform(self, docs):
"""Infer the vector representations for the input documents.
Parameters
----------
docs : {iterable of list of str, list of str}
Input document or sequence of documents.
Returns
-------
numpy.ndarray of shape [`len(docs)`, `size`]
The vector representation of the `docs`.
"""
if self.gensim_model is None:
raise NotFittedError(
"This model has not been fitted yet. Call 'fit' with appropriate arguments before using this method."
)
# The input as array of array
if isinstance(docs[0], string_types):
docs = [docs]
vectors = [self.gensim_model.infer_vector(doc) for doc in docs]
return np.reshape(np.array(vectors), (len(docs), self.gensim_model.vector_size))
| lgpl-2.1 |
tatsuy/ardupilot | Tools/LogAnalyzer/tests/TestOptFlow.py | 26 | 14969 | from LogAnalyzer import Test,TestResult
import DataflashLog
from math import sqrt
import numpy as np
import matplotlib.pyplot as plt
class TestFlow(Test):
'''test optical flow sensor scale factor calibration'''
#
# Use the following procedure to log the calibration data. is assumed that the optical flow sensor has been
# correctly aligned, is focussed and the test is performed over a textured surface with adequate lighting.
# Note that the strobing effect from non incandescent artifical lighting can produce poor optical flow measurements.
#
# 1) Set LOG_DISARMED and FLOW_ENABLE to 1 and verify that ATT and OF messages are being logged onboard
# 2) Place on level ground, apply power and wait for EKF to complete attitude alignment
# 3) Keeping the copter level, lift it to shoulder height and rock between +-20 and +-30 degrees
# in roll about an axis that passes through the flow sensor lens assembly. The time taken to rotate from
# maximum left roll to maximum right roll should be about 1 second.
# 4) Repeat 3) about the pitch axis
# 5) Holding the copter level, lower it to the ground and remove power
# 6) Transfer the logfile from the sdcard.
# 7) Open a terminal and cd to the ardupilot/Tools/LogAnalyzer directory
# 8) Enter to run the analysis 'python LogAnalyzer.py <log file name including full path>'
# 9) Check the OpticalFlow test status printed to the screen. The analysis plots are saved to
# flow_calibration.pdf and the recommended scale factors to flow_calibration.param
def __init__(self):
Test.__init__(self)
self.name = "OpticalFlow"
def run(self, logdata, verbose):
self.result = TestResult()
self.result.status = TestResult.StatusType.GOOD
def FAIL():
self.result.status = TestResult.StatusType.FAIL
def WARN():
if self.result.status != TestResult.StatusType.FAIL:
self.result.status = TestResult.StatusType.WARN
try:
# tuning parameters used by the algorithm
tilt_threshold = 15 # roll and pitch threshold used to start and stop calibration (deg)
quality_threshold = 124 # minimum flow quality required for data to be used by the curve fit (N/A)
min_rate_threshold = 0.0 # if the gyro rate is less than this, the data will not be used by the curve fit (rad/sec)
max_rate_threshold = 2.0 # if the gyro rate is greter than this, the data will not be used by the curve fit (rad/sec)
param_std_threshold = 5.0 # maximum allowable 1-std uncertainty in scaling parameter (scale factor * 1000)
param_abs_threshold = 200 # max/min allowable scale factor parameter. Values of FLOW_FXSCALER and FLOW_FYSCALER outside the range of +-param_abs_threshold indicate a sensor configuration problem.
min_num_points = 100 # minimum number of points required for a curve fit - this is necessary, but not sufficient condition - the standard deviation estimate of the fit gradient is also important.
# get the existing scale parameters
flow_fxscaler = logdata.parameters["FLOW_FXSCALER"]
flow_fyscaler = logdata.parameters["FLOW_FYSCALER"]
# load required optical flow data
if "OF" in logdata.channels:
flowX = np.zeros(len(logdata.channels["OF"]["flowX"].listData))
for i in range(len(logdata.channels["OF"]["flowX"].listData)):
(line, flowX[i]) = logdata.channels["OF"]["flowX"].listData[i]
bodyX = np.zeros(len(logdata.channels["OF"]["bodyX"].listData))
for i in range(len(logdata.channels["OF"]["bodyX"].listData)):
(line, bodyX[i]) = logdata.channels["OF"]["bodyX"].listData[i]
flowY = np.zeros(len(logdata.channels["OF"]["flowY"].listData))
for i in range(len(logdata.channels["OF"]["flowY"].listData)):
(line, flowY[i]) = logdata.channels["OF"]["flowY"].listData[i]
bodyY = np.zeros(len(logdata.channels["OF"]["bodyY"].listData))
for i in range(len(logdata.channels["OF"]["bodyY"].listData)):
(line, bodyY[i]) = logdata.channels["OF"]["bodyY"].listData[i]
flow_time_us = np.zeros(len(logdata.channels["OF"]["TimeUS"].listData))
for i in range(len(logdata.channels["OF"]["TimeUS"].listData)):
(line, flow_time_us[i]) = logdata.channels["OF"]["TimeUS"].listData[i]
flow_qual = np.zeros(len(logdata.channels["OF"]["Qual"].listData))
for i in range(len(logdata.channels["OF"]["Qual"].listData)):
(line, flow_qual[i]) = logdata.channels["OF"]["Qual"].listData[i]
else:
FAIL()
self.result.statusMessage = "FAIL: no optical flow data\n"
return
# load required attitude data
if "ATT" in logdata.channels:
Roll = np.zeros(len(logdata.channels["ATT"]["Roll"].listData))
for i in range(len(logdata.channels["ATT"]["Roll"].listData)):
(line, Roll[i]) = logdata.channels["ATT"]["Roll"].listData[i]
Pitch = np.zeros(len(logdata.channels["ATT"]["Pitch"].listData))
for i in range(len(logdata.channels["ATT"]["Pitch"].listData)):
(line, Pitch[i]) = logdata.channels["ATT"]["Pitch"].listData[i]
att_time_us = np.zeros(len(logdata.channels["ATT"]["TimeUS"].listData))
for i in range(len(logdata.channels["ATT"]["TimeUS"].listData)):
(line, att_time_us[i]) = logdata.channels["ATT"]["TimeUS"].listData[i]
else:
FAIL()
self.result.statusMessage = "FAIL: no attitude data\n"
return
# calculate the start time for the roll calibration
startTime = int(0)
startRollIndex = int(0)
for i in range(len(Roll)):
if abs(Roll[i]) > tilt_threshold:
startTime = att_time_us[i]
break
for i in range(len(flow_time_us)):
if flow_time_us[i] > startTime:
startRollIndex = i
break
# calculate the end time for the roll calibration
endTime = int(0)
endRollIndex = int(0)
for i in range(len(Roll)-1,-1,-1):
if abs(Roll[i]) > tilt_threshold:
endTime = att_time_us[i]
break
for i in range(len(flow_time_us)-1,-1,-1):
if flow_time_us[i] < endTime:
endRollIndex = i
break
# check we have enough roll data points
if (endRollIndex - startRollIndex <= min_num_points):
FAIL()
self.result.statusMessage = "FAIL: insufficient roll data pointsa\n"
return
# resample roll test data excluding data before first movement and after last movement
# also exclude data where there is insufficient angular rate
flowX_resampled = []
bodyX_resampled = []
flowX_time_us_resampled = []
for i in range(len(Roll)):
if (i >= startRollIndex) and (i <= endRollIndex) and (abs(bodyX[i]) > min_rate_threshold) and (abs(bodyX[i]) < max_rate_threshold) and (flow_qual[i] > quality_threshold):
flowX_resampled.append(flowX[i])
bodyX_resampled.append(bodyX[i])
flowX_time_us_resampled.append(flow_time_us[i])
# calculate the start time for the pitch calibration
startTime = 0
startPitchIndex = int(0)
for i in range(len(Pitch)):
if abs(Pitch[i]) > tilt_threshold:
startTime = att_time_us[i]
break
for i in range(len(flow_time_us)):
if flow_time_us[i] > startTime:
startPitchIndex = i
break
# calculate the end time for the pitch calibration
endTime = 0
endPitchIndex = int(0)
for i in range(len(Pitch)-1,-1,-1):
if abs(Pitch[i]) > tilt_threshold:
endTime = att_time_us[i]
break
for i in range(len(flow_time_us)-1,-1,-1):
if flow_time_us[i] < endTime:
endPitchIndex = i
break
# check we have enough pitch data points
if (endPitchIndex - startPitchIndex <= min_num_points):
FAIL()
self.result.statusMessage = "FAIL: insufficient pitch data pointsa\n"
return
# resample pitch test data excluding data before first movement and after last movement
# also exclude data where there is insufficient or too much angular rate
flowY_resampled = []
bodyY_resampled = []
flowY_time_us_resampled = []
for i in range(len(Roll)):
if (i >= startPitchIndex) and (i <= endPitchIndex) and (abs(bodyY[i]) > min_rate_threshold) and (abs(bodyY[i]) < max_rate_threshold) and (flow_qual[i] > quality_threshold):
flowY_resampled.append(flowY[i])
bodyY_resampled.append(bodyY[i])
flowY_time_us_resampled.append(flow_time_us[i])
# fit a straight line to the flow vs body rate data and calculate the scale factor parameter required to achieve a slope of 1
coef_flow_x , cov_x = np.polyfit(bodyX_resampled,flowX_resampled,1,rcond=None, full=False, w=None, cov=True)
coef_flow_y , cov_y = np.polyfit(bodyY_resampled,flowY_resampled,1,rcond=None, full=False, w=None, cov=True)
# taking the exisiting scale factor parameters into account, calculate the parameter values reequired to achieve a unity slope
flow_fxscaler_new = int(1000 * (((1 + 0.001 * float(flow_fxscaler))/coef_flow_x[0] - 1)))
flow_fyscaler_new = int(1000 * (((1 + 0.001 * float(flow_fyscaler))/coef_flow_y[0] - 1)))
# Do a sanity check on the scale factor variance
if sqrt(cov_x[0][0]) > param_std_threshold or sqrt(cov_y[0][0]) > param_std_threshold:
FAIL()
self.result.statusMessage = "FAIL: inaccurate fit - poor quality or insufficient data\nFLOW_FXSCALER 1STD = %u\nFLOW_FYSCALER 1STD = %u\n" % (round(1000*sqrt(cov_x[0][0])),round(1000*sqrt(cov_y[0][0])))
# Do a sanity check on the scale factors
if abs(flow_fxscaler_new) > param_abs_threshold or abs(flow_fyscaler_new) > param_abs_threshold:
FAIL()
self.result.statusMessage = "FAIL: required scale factors are excessive\nFLOW_FXSCALER=%i\nFLOW_FYSCALER=%i\n" % (flow_fxscaler,flow_fyscaler)
# display recommended scale factors
self.result.statusMessage = "Set FLOW_FXSCALER to %i\nSet FLOW_FYSCALER to %i\n\nCal plots saved to flow_calibration.pdf\nCal parameters saved to flow_calibration.param\n\nFLOW_FXSCALER 1STD = %u\nFLOW_FYSCALER 1STD = %u\n" % (flow_fxscaler_new,flow_fyscaler_new,round(1000*sqrt(cov_x[0][0])),round(1000*sqrt(cov_y[0][0])))
# calculate fit display data
body_rate_display = [-max_rate_threshold,max_rate_threshold]
fit_coef_x = np.poly1d(coef_flow_x)
flowX_display = fit_coef_x(body_rate_display)
fit_coef_y = np.poly1d(coef_flow_y)
flowY_display = fit_coef_y(body_rate_display)
# plot and save calibration test points to PDF
from matplotlib.backends.backend_pdf import PdfPages
output_plot_filename = "flow_calibration.pdf"
pp = PdfPages(output_plot_filename)
plt.figure(1,figsize=(20,13))
plt.subplot(2,1,1)
plt.plot(bodyX_resampled,flowX_resampled,'b', linestyle=' ', marker='o',label="test points")
plt.plot(body_rate_display,flowX_display,'r',linewidth=2.5,label="linear fit")
plt.title('X axis flow rate vs gyro rate')
plt.ylabel('flow rate (rad/s)')
plt.xlabel('gyro rate (rad/sec)')
plt.grid()
plt.legend(loc='upper left')
# draw plots
plt.subplot(2,1,2)
plt.plot(bodyY_resampled,flowY_resampled,'b', linestyle=' ', marker='o',label="test points")
plt.plot(body_rate_display,flowY_display,'r',linewidth=2.5,label="linear fit")
plt.title('Y axis flow rate vs gyro rate')
plt.ylabel('flow rate (rad/s)')
plt.xlabel('gyro rate (rad/sec)')
plt.grid()
plt.legend(loc='upper left')
pp.savefig()
plt.figure(2,figsize=(20,13))
plt.subplot(2,1,1)
plt.plot(flow_time_us,flowX,'b',label="flow rate - all")
plt.plot(flow_time_us,bodyX,'r',label="gyro rate - all")
plt.plot(flowX_time_us_resampled,flowX_resampled,'c', linestyle=' ', marker='o',label="flow rate - used")
plt.plot(flowX_time_us_resampled,bodyX_resampled,'m', linestyle=' ', marker='o',label="gyro rate - used")
plt.title('X axis flow and body rate vs time')
plt.ylabel('rate (rad/s)')
plt.xlabel('time (usec)')
plt.grid()
plt.legend(loc='upper left')
# draw plots
plt.subplot(2,1,2)
plt.plot(flow_time_us,flowY,'b',label="flow rate - all")
plt.plot(flow_time_us,bodyY,'r',label="gyro rate - all")
plt.plot(flowY_time_us_resampled,flowY_resampled,'c', linestyle=' ', marker='o',label="flow rate - used")
plt.plot(flowY_time_us_resampled,bodyY_resampled,'m', linestyle=' ', marker='o',label="gyro rate - used")
plt.title('Y axis flow and body rate vs time')
plt.ylabel('rate (rad/s)')
plt.xlabel('time (usec)')
plt.grid()
plt.legend(loc='upper left')
pp.savefig()
# close the pdf file
pp.close()
# close all figures
plt.close("all")
# write correction parameters to file
test_results_filename = "flow_calibration.param"
file = open(test_results_filename,"w")
file.write("FLOW_FXSCALER"+" "+str(flow_fxscaler_new)+"\n")
file.write("FLOW_FYSCALER"+" "+str(flow_fyscaler_new)+"\n")
file.close()
except KeyError as e:
self.result.status = TestResult.StatusType.FAIL
self.result.statusMessage = str(e) + ' not found'
| gpl-3.0 |
pprett/scikit-learn | sklearn/cluster/hierarchical.py | 2 | 33540 | """Hierarchical Agglomerative Clustering
These routines perform some hierarchical agglomerative clustering of some
input data.
Authors : Vincent Michel, Bertrand Thirion, Alexandre Gramfort,
Gael Varoquaux
License: BSD 3 clause
"""
from heapq import heapify, heappop, heappush, heappushpop
import warnings
import numpy as np
from scipy import sparse
from ..base import BaseEstimator, ClusterMixin
from ..externals.joblib import Memory
from ..externals import six
from ..metrics.pairwise import paired_distances, pairwise_distances
from ..utils import check_array
from ..utils.sparsetools import connected_components
from . import _hierarchical
from ._feature_agglomeration import AgglomerationTransform
from ..utils.fast_dict import IntFloatDict
from ..externals.six.moves import xrange
###############################################################################
# For non fully-connected graphs
def _fix_connectivity(X, connectivity, n_components=None,
affinity="euclidean"):
"""
Fixes the connectivity matrix
- copies it
- makes it symmetric
- converts it to LIL if necessary
- completes it if necessary
"""
n_samples = X.shape[0]
if (connectivity.shape[0] != n_samples or
connectivity.shape[1] != n_samples):
raise ValueError('Wrong shape for connectivity matrix: %s '
'when X is %s' % (connectivity.shape, X.shape))
# Make the connectivity matrix symmetric:
connectivity = connectivity + connectivity.T
# Convert connectivity matrix to LIL
if not sparse.isspmatrix_lil(connectivity):
if not sparse.isspmatrix(connectivity):
connectivity = sparse.lil_matrix(connectivity)
else:
connectivity = connectivity.tolil()
# Compute the number of nodes
n_components, labels = connected_components(connectivity)
if n_components > 1:
warnings.warn("the number of connected components of the "
"connectivity matrix is %d > 1. Completing it to avoid "
"stopping the tree early." % n_components,
stacklevel=2)
# XXX: Can we do without completing the matrix?
for i in xrange(n_components):
idx_i = np.where(labels == i)[0]
Xi = X[idx_i]
for j in xrange(i):
idx_j = np.where(labels == j)[0]
Xj = X[idx_j]
D = pairwise_distances(Xi, Xj, metric=affinity)
ii, jj = np.where(D == np.min(D))
ii = ii[0]
jj = jj[0]
connectivity[idx_i[ii], idx_j[jj]] = True
connectivity[idx_j[jj], idx_i[ii]] = True
return connectivity, n_components
###############################################################################
# Hierarchical tree building functions
def ward_tree(X, connectivity=None, n_clusters=None, return_distance=False):
"""Ward clustering based on a Feature matrix.
Recursively merges the pair of clusters that minimally increases
within-cluster variance.
The inertia matrix uses a Heapq-based representation.
This is the structured version, that takes into account some topological
structure between samples.
Read more in the :ref:`User Guide <hierarchical_clustering>`.
Parameters
----------
X : array, shape (n_samples, n_features)
feature matrix representing n_samples samples to be clustered
connectivity : sparse matrix (optional).
connectivity matrix. Defines for each sample the neighboring samples
following a given structure of the data. The matrix is assumed to
be symmetric and only the upper triangular half is used.
Default is None, i.e, the Ward algorithm is unstructured.
n_clusters : int (optional)
Stop early the construction of the tree at n_clusters. This is
useful to decrease computation time if the number of clusters is
not small compared to the number of samples. In this case, the
complete tree is not computed, thus the 'children' output is of
limited use, and the 'parents' output should rather be used.
This option is valid only when specifying a connectivity matrix.
return_distance : bool (optional)
If True, return the distance between the clusters.
Returns
-------
children : 2D array, shape (n_nodes-1, 2)
The children of each non-leaf node. Values less than `n_samples`
correspond to leaves of the tree which are the original samples.
A node `i` greater than or equal to `n_samples` is a non-leaf
node and has children `children_[i - n_samples]`. Alternatively
at the i-th iteration, children[i][0] and children[i][1]
are merged to form node `n_samples + i`
n_components : int
The number of connected components in the graph.
n_leaves : int
The number of leaves in the tree
parents : 1D array, shape (n_nodes, ) or None
The parent of each node. Only returned when a connectivity matrix
is specified, elsewhere 'None' is returned.
distances : 1D array, shape (n_nodes-1, )
Only returned if return_distance is set to True (for compatibility).
The distances between the centers of the nodes. `distances[i]`
corresponds to a weighted euclidean distance between
the nodes `children[i, 1]` and `children[i, 2]`. If the nodes refer to
leaves of the tree, then `distances[i]` is their unweighted euclidean
distance. Distances are updated in the following way
(from scipy.hierarchy.linkage):
The new entry :math:`d(u,v)` is computed as follows,
.. math::
d(u,v) = \\sqrt{\\frac{|v|+|s|}
{T}d(v,s)^2
+ \\frac{|v|+|t|}
{T}d(v,t)^2
- \\frac{|v|}
{T}d(s,t)^2}
where :math:`u` is the newly joined cluster consisting of
clusters :math:`s` and :math:`t`, :math:`v` is an unused
cluster in the forest, :math:`T=|v|+|s|+|t|`, and
:math:`|*|` is the cardinality of its argument. This is also
known as the incremental algorithm.
"""
X = np.asarray(X)
if X.ndim == 1:
X = np.reshape(X, (-1, 1))
n_samples, n_features = X.shape
if connectivity is None:
from scipy.cluster import hierarchy # imports PIL
if n_clusters is not None:
warnings.warn('Partial build of the tree is implemented '
'only for structured clustering (i.e. with '
'explicit connectivity). The algorithm '
'will build the full tree and only '
'retain the lower branches required '
'for the specified number of clusters',
stacklevel=2)
out = hierarchy.ward(X)
children_ = out[:, :2].astype(np.intp)
if return_distance:
distances = out[:, 2]
return children_, 1, n_samples, None, distances
else:
return children_, 1, n_samples, None
connectivity, n_components = _fix_connectivity(X, connectivity)
if n_clusters is None:
n_nodes = 2 * n_samples - 1
else:
if n_clusters > n_samples:
raise ValueError('Cannot provide more clusters than samples. '
'%i n_clusters was asked, and there are %i samples.'
% (n_clusters, n_samples))
n_nodes = 2 * n_samples - n_clusters
# create inertia matrix
coord_row = []
coord_col = []
A = []
for ind, row in enumerate(connectivity.rows):
A.append(row)
# We keep only the upper triangular for the moments
# Generator expressions are faster than arrays on the following
row = [i for i in row if i < ind]
coord_row.extend(len(row) * [ind, ])
coord_col.extend(row)
coord_row = np.array(coord_row, dtype=np.intp, order='C')
coord_col = np.array(coord_col, dtype=np.intp, order='C')
# build moments as a list
moments_1 = np.zeros(n_nodes, order='C')
moments_1[:n_samples] = 1
moments_2 = np.zeros((n_nodes, n_features), order='C')
moments_2[:n_samples] = X
inertia = np.empty(len(coord_row), dtype=np.float64, order='C')
_hierarchical.compute_ward_dist(moments_1, moments_2, coord_row, coord_col,
inertia)
inertia = list(six.moves.zip(inertia, coord_row, coord_col))
heapify(inertia)
# prepare the main fields
parent = np.arange(n_nodes, dtype=np.intp)
used_node = np.ones(n_nodes, dtype=bool)
children = []
if return_distance:
distances = np.empty(n_nodes - n_samples)
not_visited = np.empty(n_nodes, dtype=np.int8, order='C')
# recursive merge loop
for k in range(n_samples, n_nodes):
# identify the merge
while True:
inert, i, j = heappop(inertia)
if used_node[i] and used_node[j]:
break
parent[i], parent[j] = k, k
children.append((i, j))
used_node[i] = used_node[j] = False
if return_distance: # store inertia value
distances[k - n_samples] = inert
# update the moments
moments_1[k] = moments_1[i] + moments_1[j]
moments_2[k] = moments_2[i] + moments_2[j]
# update the structure matrix A and the inertia matrix
coord_col = []
not_visited.fill(1)
not_visited[k] = 0
_hierarchical._get_parents(A[i], coord_col, parent, not_visited)
_hierarchical._get_parents(A[j], coord_col, parent, not_visited)
# List comprehension is faster than a for loop
[A[l].append(k) for l in coord_col]
A.append(coord_col)
coord_col = np.array(coord_col, dtype=np.intp, order='C')
coord_row = np.empty(coord_col.shape, dtype=np.intp, order='C')
coord_row.fill(k)
n_additions = len(coord_row)
ini = np.empty(n_additions, dtype=np.float64, order='C')
_hierarchical.compute_ward_dist(moments_1, moments_2,
coord_row, coord_col, ini)
# List comprehension is faster than a for loop
[heappush(inertia, (ini[idx], k, coord_col[idx]))
for idx in range(n_additions)]
# Separate leaves in children (empty lists up to now)
n_leaves = n_samples
# sort children to get consistent output with unstructured version
children = [c[::-1] for c in children]
children = np.array(children) # return numpy array for efficient caching
if return_distance:
# 2 is scaling factor to compare w/ unstructured version
distances = np.sqrt(2. * distances)
return children, n_components, n_leaves, parent, distances
else:
return children, n_components, n_leaves, parent
# average and complete linkage
def linkage_tree(X, connectivity=None, n_components=None,
n_clusters=None, linkage='complete', affinity="euclidean",
return_distance=False):
"""Linkage agglomerative clustering based on a Feature matrix.
The inertia matrix uses a Heapq-based representation.
This is the structured version, that takes into account some topological
structure between samples.
Read more in the :ref:`User Guide <hierarchical_clustering>`.
Parameters
----------
X : array, shape (n_samples, n_features)
feature matrix representing n_samples samples to be clustered
connectivity : sparse matrix (optional).
connectivity matrix. Defines for each sample the neighboring samples
following a given structure of the data. The matrix is assumed to
be symmetric and only the upper triangular half is used.
Default is None, i.e, the Ward algorithm is unstructured.
n_clusters : int (optional)
Stop early the construction of the tree at n_clusters. This is
useful to decrease computation time if the number of clusters is
not small compared to the number of samples. In this case, the
complete tree is not computed, thus the 'children' output is of
limited use, and the 'parents' output should rather be used.
This option is valid only when specifying a connectivity matrix.
linkage : {"average", "complete"}, optional, default: "complete"
Which linkage criteria to use. The linkage criterion determines which
distance to use between sets of observation.
- average uses the average of the distances of each observation of
the two sets
- complete or maximum linkage uses the maximum distances between
all observations of the two sets.
affinity : string or callable, optional, default: "euclidean".
which metric to use. Can be "euclidean", "manhattan", or any
distance know to paired distance (see metric.pairwise)
return_distance : bool, default False
whether or not to return the distances between the clusters.
Returns
-------
children : 2D array, shape (n_nodes-1, 2)
The children of each non-leaf node. Values less than `n_samples`
correspond to leaves of the tree which are the original samples.
A node `i` greater than or equal to `n_samples` is a non-leaf
node and has children `children_[i - n_samples]`. Alternatively
at the i-th iteration, children[i][0] and children[i][1]
are merged to form node `n_samples + i`
n_components : int
The number of connected components in the graph.
n_leaves : int
The number of leaves in the tree.
parents : 1D array, shape (n_nodes, ) or None
The parent of each node. Only returned when a connectivity matrix
is specified, elsewhere 'None' is returned.
distances : ndarray, shape (n_nodes-1,)
Returned when return_distance is set to True.
distances[i] refers to the distance between children[i][0] and
children[i][1] when they are merged.
See also
--------
ward_tree : hierarchical clustering with ward linkage
"""
X = np.asarray(X)
if X.ndim == 1:
X = np.reshape(X, (-1, 1))
n_samples, n_features = X.shape
linkage_choices = {'complete': _hierarchical.max_merge,
'average': _hierarchical.average_merge}
try:
join_func = linkage_choices[linkage]
except KeyError:
raise ValueError(
'Unknown linkage option, linkage should be one '
'of %s, but %s was given' % (linkage_choices.keys(), linkage))
if connectivity is None:
from scipy.cluster import hierarchy # imports PIL
if n_clusters is not None:
warnings.warn('Partial build of the tree is implemented '
'only for structured clustering (i.e. with '
'explicit connectivity). The algorithm '
'will build the full tree and only '
'retain the lower branches required '
'for the specified number of clusters',
stacklevel=2)
if affinity == 'precomputed':
# for the linkage function of hierarchy to work on precomputed
# data, provide as first argument an ndarray of the shape returned
# by pdist: it is a flat array containing the upper triangular of
# the distance matrix.
i, j = np.triu_indices(X.shape[0], k=1)
X = X[i, j]
elif affinity == 'l2':
# Translate to something understood by scipy
affinity = 'euclidean'
elif affinity in ('l1', 'manhattan'):
affinity = 'cityblock'
elif callable(affinity):
X = affinity(X)
i, j = np.triu_indices(X.shape[0], k=1)
X = X[i, j]
out = hierarchy.linkage(X, method=linkage, metric=affinity)
children_ = out[:, :2].astype(np.int)
if return_distance:
distances = out[:, 2]
return children_, 1, n_samples, None, distances
return children_, 1, n_samples, None
connectivity, n_components = _fix_connectivity(X, connectivity)
connectivity = connectivity.tocoo()
# Put the diagonal to zero
diag_mask = (connectivity.row != connectivity.col)
connectivity.row = connectivity.row[diag_mask]
connectivity.col = connectivity.col[diag_mask]
connectivity.data = connectivity.data[diag_mask]
del diag_mask
if affinity == 'precomputed':
distances = X[connectivity.row, connectivity.col]
else:
# FIXME We compute all the distances, while we could have only computed
# the "interesting" distances
distances = paired_distances(X[connectivity.row],
X[connectivity.col],
metric=affinity)
connectivity.data = distances
if n_clusters is None:
n_nodes = 2 * n_samples - 1
else:
assert n_clusters <= n_samples
n_nodes = 2 * n_samples - n_clusters
if return_distance:
distances = np.empty(n_nodes - n_samples)
# create inertia heap and connection matrix
A = np.empty(n_nodes, dtype=object)
inertia = list()
# LIL seems to the best format to access the rows quickly,
# without the numpy overhead of slicing CSR indices and data.
connectivity = connectivity.tolil()
# We are storing the graph in a list of IntFloatDict
for ind, (data, row) in enumerate(zip(connectivity.data,
connectivity.rows)):
A[ind] = IntFloatDict(np.asarray(row, dtype=np.intp),
np.asarray(data, dtype=np.float64))
# We keep only the upper triangular for the heap
# Generator expressions are faster than arrays on the following
inertia.extend(_hierarchical.WeightedEdge(d, ind, r)
for r, d in zip(row, data) if r < ind)
del connectivity
heapify(inertia)
# prepare the main fields
parent = np.arange(n_nodes, dtype=np.intp)
used_node = np.ones(n_nodes, dtype=np.intp)
children = []
# recursive merge loop
for k in xrange(n_samples, n_nodes):
# identify the merge
while True:
edge = heappop(inertia)
if used_node[edge.a] and used_node[edge.b]:
break
i = edge.a
j = edge.b
if return_distance:
# store distances
distances[k - n_samples] = edge.weight
parent[i] = parent[j] = k
children.append((i, j))
# Keep track of the number of elements per cluster
n_i = used_node[i]
n_j = used_node[j]
used_node[k] = n_i + n_j
used_node[i] = used_node[j] = False
# update the structure matrix A and the inertia matrix
# a clever 'min', or 'max' operation between A[i] and A[j]
coord_col = join_func(A[i], A[j], used_node, n_i, n_j)
for l, d in coord_col:
A[l].append(k, d)
# Here we use the information from coord_col (containing the
# distances) to update the heap
heappush(inertia, _hierarchical.WeightedEdge(d, k, l))
A[k] = coord_col
# Clear A[i] and A[j] to save memory
A[i] = A[j] = 0
# Separate leaves in children (empty lists up to now)
n_leaves = n_samples
# # return numpy array for efficient caching
children = np.array(children)[:, ::-1]
if return_distance:
return children, n_components, n_leaves, parent, distances
return children, n_components, n_leaves, parent
# Matching names to tree-building strategies
def _complete_linkage(*args, **kwargs):
kwargs['linkage'] = 'complete'
return linkage_tree(*args, **kwargs)
def _average_linkage(*args, **kwargs):
kwargs['linkage'] = 'average'
return linkage_tree(*args, **kwargs)
_TREE_BUILDERS = dict(
ward=ward_tree,
complete=_complete_linkage,
average=_average_linkage)
###############################################################################
# Functions for cutting hierarchical clustering tree
def _hc_cut(n_clusters, children, n_leaves):
"""Function cutting the ward tree for a given number of clusters.
Parameters
----------
n_clusters : int or ndarray
The number of clusters to form.
children : 2D array, shape (n_nodes-1, 2)
The children of each non-leaf node. Values less than `n_samples`
correspond to leaves of the tree which are the original samples.
A node `i` greater than or equal to `n_samples` is a non-leaf
node and has children `children_[i - n_samples]`. Alternatively
at the i-th iteration, children[i][0] and children[i][1]
are merged to form node `n_samples + i`
n_leaves : int
Number of leaves of the tree.
Returns
-------
labels : array [n_samples]
cluster labels for each point
"""
if n_clusters > n_leaves:
raise ValueError('Cannot extract more clusters than samples: '
'%s clusters where given for a tree with %s leaves.'
% (n_clusters, n_leaves))
# In this function, we store nodes as a heap to avoid recomputing
# the max of the nodes: the first element is always the smallest
# We use negated indices as heaps work on smallest elements, and we
# are interested in largest elements
# children[-1] is the root of the tree
nodes = [-(max(children[-1]) + 1)]
for i in xrange(n_clusters - 1):
# As we have a heap, nodes[0] is the smallest element
these_children = children[-nodes[0] - n_leaves]
# Insert the 2 children and remove the largest node
heappush(nodes, -these_children[0])
heappushpop(nodes, -these_children[1])
label = np.zeros(n_leaves, dtype=np.intp)
for i, node in enumerate(nodes):
label[_hierarchical._hc_get_descendent(-node, children, n_leaves)] = i
return label
###############################################################################
class AgglomerativeClustering(BaseEstimator, ClusterMixin):
"""
Agglomerative Clustering
Recursively merges the pair of clusters that minimally increases
a given linkage distance.
Read more in the :ref:`User Guide <hierarchical_clustering>`.
Parameters
----------
n_clusters : int, default=2
The number of clusters to find.
connectivity : array-like or callable, optional
Connectivity matrix. Defines for each sample the neighboring
samples following a given structure of the data.
This can be a connectivity matrix itself or a callable that transforms
the data into a connectivity matrix, such as derived from
kneighbors_graph. Default is None, i.e, the
hierarchical clustering algorithm is unstructured.
affinity : string or callable, default: "euclidean"
Metric used to compute the linkage. Can be "euclidean", "l1", "l2",
"manhattan", "cosine", or 'precomputed'.
If linkage is "ward", only "euclidean" is accepted.
memory : Instance of sklearn.externals.joblib.Memory or string, optional \
(default=None)
Used to cache the output of the computation of the tree.
By default, no caching is done. If a string is given, it is the
path to the caching directory.
compute_full_tree : bool or 'auto' (optional)
Stop early the construction of the tree at n_clusters. This is
useful to decrease computation time if the number of clusters is
not small compared to the number of samples. This option is
useful only when specifying a connectivity matrix. Note also that
when varying the number of clusters and using caching, it may
be advantageous to compute the full tree.
linkage : {"ward", "complete", "average"}, optional, default: "ward"
Which linkage criterion to use. The linkage criterion determines which
distance to use between sets of observation. The algorithm will merge
the pairs of cluster that minimize this criterion.
- ward minimizes the variance of the clusters being merged.
- average uses the average of the distances of each observation of
the two sets.
- complete or maximum linkage uses the maximum distances between
all observations of the two sets.
pooling_func : callable, default=np.mean
This combines the values of agglomerated features into a single
value, and should accept an array of shape [M, N] and the keyword
argument ``axis=1``, and reduce it to an array of size [M].
Attributes
----------
labels_ : array [n_samples]
cluster labels for each point
n_leaves_ : int
Number of leaves in the hierarchical tree.
n_components_ : int
The estimated number of connected components in the graph.
children_ : array-like, shape (n_nodes-1, 2)
The children of each non-leaf node. Values less than `n_samples`
correspond to leaves of the tree which are the original samples.
A node `i` greater than or equal to `n_samples` is a non-leaf
node and has children `children_[i - n_samples]`. Alternatively
at the i-th iteration, children[i][0] and children[i][1]
are merged to form node `n_samples + i`
"""
def __init__(self, n_clusters=2, affinity="euclidean",
memory=None,
connectivity=None, compute_full_tree='auto',
linkage='ward', pooling_func=np.mean):
self.n_clusters = n_clusters
self.memory = memory
self.connectivity = connectivity
self.compute_full_tree = compute_full_tree
self.linkage = linkage
self.affinity = affinity
self.pooling_func = pooling_func
def fit(self, X, y=None):
"""Fit the hierarchical clustering on the data
Parameters
----------
X : array-like, shape = [n_samples, n_features]
The samples a.k.a. observations.
Returns
-------
self
"""
X = check_array(X, ensure_min_samples=2, estimator=self)
memory = self.memory
if memory is None:
memory = Memory(cachedir=None, verbose=0)
elif isinstance(memory, six.string_types):
memory = Memory(cachedir=memory, verbose=0)
elif not isinstance(memory, Memory):
raise ValueError("'memory' should either be a string or"
" a sklearn.externals.joblib.Memory"
" instance, got 'memory={!r}' instead.".format(
type(memory)))
if self.n_clusters <= 0:
raise ValueError("n_clusters should be an integer greater than 0."
" %s was provided." % str(self.n_clusters))
if self.linkage == "ward" and self.affinity != "euclidean":
raise ValueError("%s was provided as affinity. Ward can only "
"work with euclidean distances." %
(self.affinity, ))
if self.linkage not in _TREE_BUILDERS:
raise ValueError("Unknown linkage type %s."
"Valid options are %s" % (self.linkage,
_TREE_BUILDERS.keys()))
tree_builder = _TREE_BUILDERS[self.linkage]
connectivity = self.connectivity
if self.connectivity is not None:
if callable(self.connectivity):
connectivity = self.connectivity(X)
connectivity = check_array(
connectivity, accept_sparse=['csr', 'coo', 'lil'])
n_samples = len(X)
compute_full_tree = self.compute_full_tree
if self.connectivity is None:
compute_full_tree = True
if compute_full_tree == 'auto':
# Early stopping is likely to give a speed up only for
# a large number of clusters. The actual threshold
# implemented here is heuristic
compute_full_tree = self.n_clusters < max(100, .02 * n_samples)
n_clusters = self.n_clusters
if compute_full_tree:
n_clusters = None
# Construct the tree
kwargs = {}
if self.linkage != 'ward':
kwargs['linkage'] = self.linkage
kwargs['affinity'] = self.affinity
self.children_, self.n_components_, self.n_leaves_, parents = \
memory.cache(tree_builder)(X, connectivity,
n_clusters=n_clusters,
**kwargs)
# Cut the tree
if compute_full_tree:
self.labels_ = _hc_cut(self.n_clusters, self.children_,
self.n_leaves_)
else:
labels = _hierarchical.hc_get_heads(parents, copy=False)
# copy to avoid holding a reference on the original array
labels = np.copy(labels[:n_samples])
# Reassign cluster numbers
self.labels_ = np.searchsorted(np.unique(labels), labels)
return self
class FeatureAgglomeration(AgglomerativeClustering, AgglomerationTransform):
"""Agglomerate features.
Similar to AgglomerativeClustering, but recursively merges features
instead of samples.
Read more in the :ref:`User Guide <hierarchical_clustering>`.
Parameters
----------
n_clusters : int, default 2
The number of clusters to find.
connectivity : array-like or callable, optional
Connectivity matrix. Defines for each feature the neighboring
features following a given structure of the data.
This can be a connectivity matrix itself or a callable that transforms
the data into a connectivity matrix, such as derived from
kneighbors_graph. Default is None, i.e, the
hierarchical clustering algorithm is unstructured.
affinity : string or callable, default "euclidean"
Metric used to compute the linkage. Can be "euclidean", "l1", "l2",
"manhattan", "cosine", or 'precomputed'.
If linkage is "ward", only "euclidean" is accepted.
memory : Instance of sklearn.externals.joblib.Memory or string, optional \
(default=None)
Used to cache the output of the computation of the tree.
By default, no caching is done. If a string is given, it is the
path to the caching directory.
compute_full_tree : bool or 'auto', optional, default "auto"
Stop early the construction of the tree at n_clusters. This is
useful to decrease computation time if the number of clusters is
not small compared to the number of features. This option is
useful only when specifying a connectivity matrix. Note also that
when varying the number of clusters and using caching, it may
be advantageous to compute the full tree.
linkage : {"ward", "complete", "average"}, optional, default "ward"
Which linkage criterion to use. The linkage criterion determines which
distance to use between sets of features. The algorithm will merge
the pairs of cluster that minimize this criterion.
- ward minimizes the variance of the clusters being merged.
- average uses the average of the distances of each feature of
the two sets.
- complete or maximum linkage uses the maximum distances between
all features of the two sets.
pooling_func : callable, default np.mean
This combines the values of agglomerated features into a single
value, and should accept an array of shape [M, N] and the keyword
argument `axis=1`, and reduce it to an array of size [M].
Attributes
----------
labels_ : array-like, (n_features,)
cluster labels for each feature.
n_leaves_ : int
Number of leaves in the hierarchical tree.
n_components_ : int
The estimated number of connected components in the graph.
children_ : array-like, shape (n_nodes-1, 2)
The children of each non-leaf node. Values less than `n_features`
correspond to leaves of the tree which are the original samples.
A node `i` greater than or equal to `n_features` is a non-leaf
node and has children `children_[i - n_features]`. Alternatively
at the i-th iteration, children[i][0] and children[i][1]
are merged to form node `n_features + i`
"""
def fit(self, X, y=None, **params):
"""Fit the hierarchical clustering on the data
Parameters
----------
X : array-like, shape = [n_samples, n_features]
The data
Returns
-------
self
"""
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'],
ensure_min_features=2, estimator=self)
return AgglomerativeClustering.fit(self, X.T, **params)
@property
def fit_predict(self):
raise AttributeError
| bsd-3-clause |
mwinton/pyml-book | logistic_regression_w_scikit_sepal_petal.py | 1 | 7374 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Oct 19 21:24:25 2017
@author: mike
"""
from sklearn import datasets
from sklearn.model_selection import train_test_split as tts
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import Perceptron
from sklearn.metrics import accuracy_score
from matplotlib.colors import ListedColormap
import matplotlib.pyplot as plt
import numpy as np
iris = datasets.load_iris()
X = iris.data[:, [0,2]] # sepal length, petal length
y = iris.target
# Subset data set to only classes 0,1
X_01 = X[(y==0) | (y==1)]
y_01 = y[(y==0) | (y==1)]
print('Class labels:', np.unique(y_01))
X_train, X_test, y_train, y_test = tts(X_01,y_01,test_size=0.3, random_state=1, stratify=y_01)
# Split original data into train/test sets with equal proportions of each class
#print('Class labels:', np.unique(y))
#X_train, X_test, y_train, y_test = tts(X,y,test_size=0.3, random_state=1, stratify=y)
print('Label counts in y:', np.bincount(y))
print('Label counts in y_train:', np.bincount(y_train))
print('Label counts in y_test:', np.bincount(y_test))
# Apply feature scaling to standardize the train/test sets. (Both use mu/sigma from training)
print('Applying standardization to data set')
sc = StandardScaler()
sc.fit(X_train)
X_train_std = sc.transform(X_train)
X_test_std = sc.transform(X_test)
# Train perceptron
#lrgd = LogisticRegressionGD(n_iter=40, eta=0.1, random_state=1) # Python ML book
lrgd = LogisticRegressionGD(n_iter=100, alpha = 2.0, random_state=1) # Andrew Ng book
lrgd.fit(X_train_std, y_train)
# Make prediction for test set
y_pred = lrgd.predict(X_test_std)
print('Correctly classified samples: %d' % (y_test == y_pred).sum())
print('Misclassified samples: %d' % (y_test != y_pred).sum())
print('Accuracy: %.2f' % accuracy_score(y_test, y_pred))
#print('Accuracy: %.2f' % lrgd.score(X_test_std, y_test)) #equivalent to above
#Plot costs vs. # iterations to verify convergence
plt.plot(range(1, len(lrgd.cost_) + 1),lrgd.cost_, marker='o')
plt.xlabel('Epochs')
plt.ylabel('Gradient descent cost function')
plt.title('Convergence of Logistic Regression model')
plt.show()
# Plot data and decision regions
X_combined_std = np.vstack((X_train_std, X_test_std))
y_combined = np.hstack((y_train, y_test))
plot_decision_regions(X=X_combined_std,
y=y_combined,
classifier=lrgd,
test_idx=range(y_train.shape[0],y_train.shape[0]+y_test.shape[0]))
plt.title('Iris Classification by logistic regression')
plt.xlabel('sepal length (standardized)')
plt.ylabel('petal length (standardized)')
plt.legend()
plt.show()
class LogisticRegressionGD(object):
"""Logistic Regression classifier with gradient descent
Parameters
------------
eta : float
Learning rate (between 0.0 and 1.0)
alpha : float
Learning rate (between 0.0 and 1.0)
n_iter : int
Passes over the training dataset.
random_state : int
Random number generator seed for random weight
initialization.
Attributes
-----------
w_ : 1d-array
Weights after fitting.
cost_ : list
Sum of squares cost function value in each epoch.
"""
def __init__(self, alpha = 2.0, eta=0.05, n_iter=100, random_state=1):
self.eta = eta #Py ML book
self.alpha = alpha # Andrew Ng
self.n_iter = n_iter
self.random_state = random_state
def fit(self, X, y):
"""
Fits training data. Optimizes w_ with gradient descent.
Parameters
----------
X : {array-like}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
Returns
-------
self : object
"""
# Set up initial values for weights
rgen = np.random.RandomState(self.random_state)
self.w_ = rgen.normal(loc=0.0, scale=0.01, size=1 + X.shape[1])
self.cost_ = []
m = len(y) # Andrew Ng definition
#Run gradient descent (NOTE: it keeps running through n_iter no matter what)
for i in range(self.n_iter):
net_input = self.net_input(X) #scalar
output = self.sigmoid(net_input) #vector
errors = (y - output) #vector
# self.w_[0] += self.eta * errors.sum() #vector; w_[0] is bias unit
# self.w_[1:] += self.eta * X.T.dot(errors) #vector
self.w_[0] += (self.alpha / m) * errors.sum() #vector; w_[0] is bias unit
self.w_[1:] += (self.alpha / m) * X.T.dot(errors) #vector
cost = self.cost_function(y, output)
self.cost_.append(cost) #used to verify convergence
return self
def net_input(self, X):
"""Calculate net input. This term is z in Andrew Ng's class """
return np.dot(X, self.w_[1:]) + self.w_[0]
def sigmoid (self, z):
'''Calculate sigmoid function fron net_input (z)'''
# z values > 250 (or < -250) are clipped at 250
return 1.0 / (1.0 + np.exp(-np.clip(z,-250,250)))
def cost_function (self,y,output):
'''Calculate the logistic regression cost function'''
m = len(y)
cost = (1/m) * (-y.dot(np.log(output)) - ((1.-y).dot(np.log(1.-output))))
return cost
def predict(self, X):
"""Return class label based on sigmoid function"""
#Due to shape of sigmoid function, these two options are equivalent
#return np.where(self.net_input(X) >= 0, 1, 0)
return np.where(self.sigmoid(self.net_input(X)) >= 0.5, 1, 0)
def plot_decision_regions(X, y, classifier, test_idx=None, resolution=0.02):
'''
Helper function to plot the decision regions.
'''
# setup marker generator and color map
markers = ('s', 'x', 'o', '^', 'v')
colors = ('red', 'blue', 'lightgreen', 'gray', 'cyan')
cmap = ListedColormap(colors[:len(np.unique(y))])
# plot the decision surface
x1_min, x1_max = X[:, 0].min() - 1, X[:, 0].max() + 1
x2_min, x2_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx1, xx2 = np.meshgrid(np.arange(x1_min, x1_max, resolution),
np.arange(x2_min, x2_max, resolution))
Z = classifier.predict(np.array([xx1.ravel(), xx2.ravel()]).T)
Z = Z.reshape(xx1.shape)
plt.contourf(xx1, xx2, Z, alpha=0.3, cmap=cmap)
plt.xlim(xx1.min(), xx1.max())
plt.ylim(xx2.min(), xx2.max())
# plot class samples
for idx, cl in enumerate(np.unique(y)):
plt.scatter(x=X[y == cl, 0],
y=X[y == cl, 1],
alpha=0.8,
c=colors[idx],
marker=markers[idx],
label=cl,
edgecolor='black')
# highlight test samples
if test_idx:
# plot all samples
X_test, y_test = X[test_idx,:], y[test_idx]
plt.scatter(X_test[:,0], X_test[:,1],
alpha=1.0,
c='',
linewidth = 1,
marker='o',
s=100,
label='test set',
edgecolor='black')
| mit |
chjost/clebsch_gordan | group/group_cg.py | 1 | 13406 | """Class for the clebsch-gordan coefficients of a group."""
import numpy as np
import itertools as it
import pandas as pd
from pandas import Series, DataFrame
import group_class
import utils
from rotations import _all_rotations
class OhCG(object):
def __init__(self, p, p1, p2, groups=None):
"""p, p1, and p2 are the magnitudes of the momenta.
"""
self.prec = 1e-6
# save the norm of the momenta for the combined system
# and each particle
self.p = p
self.p1 = p1
self.p2 = p2
# lookup table for reference momenta
lpref = [np.asarray([0.,0.,0.]), np.asarray([0.,0.,1.]),
np.asarray([1.,1.,0.]), np.asarray([1.,1.,1.])]
# save reference momenta
self.pref = lpref[p]
self.pref1 = lpref[p1]
self.pref2 = lpref[p2]
# get the basic groups
if groups is None:
self.g0 = None
self.g = None
self.g1 = None
self.g2 = None
else:
self.g0 = groups[0]
self.g = groups[p]
self.g1 = groups[p1]
self.g2 = groups[p2]
# get the cosets, always in the maximal group (2O here)
# is set to None if at least one group is None
self.coset1 = self.gen_coset(self.g1)
self.coset2 = self.gen_coset(self.g2)
#print(self.coset1)
#print(self.coset2)
# generate the allowed momentum combinations and sort them into cosets
self.gen_momenta()
if groups is not None:
self.sort_momenta()
# calculate induced rep gamma
# here for p1 and p2 the A1(A2) irreps are hard-coded
# since only these contribute to pi-pi scattering
if groups is None:
self.gamma1 = None
self.gamma2 = None
else:
irstr = "A1" if p1 < 1e-6 else "A2"
self.gamma1 = self.gen_ind_reps(self.g, self.g1, irstr, self.coset1)
irstr = "A1" if p2 < 1e-6 else "A2"
self.gamma2 = self.gen_ind_reps(self.g, self.g2, irstr, self.coset2)
#print(self.gamma1[:5])
self.irreps = []
self.cgs = []
# choose mu1, mu2 and i1, i2, according to Dudek paper
# since A1 and A2 are 1D, set mu to 0
self.mu1 = 0
self.mu2 = 0
if self.p == 0:
# in this case chose the hightest option
self.i1 = -1
self.i2 = -1
self.i1i2 = [(self.pref, -1, -1)]
else:
self.i1i2 = []
for m in self.momenta:
for ((m1, i1), (m2, i2)) in it.product(self.smomenta1, self.smomenta2):
if utils._eq(m1+m2-m):
self.i1i2.append((m,i1,i2))
self.i1 = i1
self.i2 = i2
break
def gen_momenta(self):
pm = 4 # maximum component in each direction
def _abs(x):
return np.dot(x, x) <= pm
def _abs1(x,a):
return np.dot(x, x) == a
gen = it.ifilter(_abs, it.product(range(-pm,pm+1), repeat=3))
lp3 = [np.asarray(y, dtype=int) for y in gen]
self.momenta = [y for y in it.ifilter(lambda x: _abs1(x,self.p), lp3)]
self.momenta1 = [y for y in it.ifilter(lambda x: _abs1(x,self.p1), lp3)]
self.momenta2 = [y for y in it.ifilter(lambda x: _abs1(x,self.p2), lp3)]
self.allmomenta = []
# only save allowed momenta combinations
for p in self.momenta:
for p1 in self.momenta1:
for p2 in self.momenta2:
if utils._eq(p1+p2-p):
self.allmomenta.append((p, p1, p2))
def gen_coset(self, g1):
"""Cosets contain the numbers of the rotation objects
"""
if self.g0 is None or g1 is None:
return None
n = int(self.g0.order/g1.order)
if n == 0:
raise RuntimeError("number of cosets is 0!")
coset = np.zeros((n, g1.order), dtype=int)
l = self.g0.order
l1 = g1.order
# set the subgroup
count = 0
for r in range(l1):
elem = g1.lrotations[r]
if elem in self.g0.lrotations:
coset[0, count] = elem
count += 1
# calc the cosets
uniq = np.unique(coset)
cnum = 1 # coset number
for elem in self.g0.lrotations:
if elem in uniq:
continue
count = 0
for elem1 in g1.lrotations:
if elem1 in self.g0.lrotations:
# multiplication table contains numbers
# [0, g.order), so lookup the element
look = self.g0.lrotations.index(elem1)
el = self.g0.tmult[look, elem]
coset[cnum, count] = self.g0.lrotations[el]
count += 1
cnum += 1
uniq = np.unique(coset)
if len(uniq) != self.g0.order:
print("some elements got lost!")
if cnum != n:
print("some coset not found!")
return coset
def gen_ind_reps(self, g, g1, irstr, coset):
ir = g1.instances[g1.lirreps.index(irstr)]
dim = ir.dim
ndim = (self.g0.order, coset.shape[0]*dim, coset.shape[0]*dim)
gamma = np.zeros(ndim, dtype=complex)
for ind, r in enumerate(self.g0.lrotations):
# take the first elements of the coset as representatives
for c1, rj in enumerate(coset[:,0]):
# translate to multiplication table
el1 = self.g0.lrotations.index(rj)
# get element
rrj = self.g0.tmult[ind,el1]
ind1 = slice(c1*dim, (c1+1)*dim)
for c2, ri in enumerate(coset[:,0]):
# translate to multiplication table and get inverse
el2 = self.g0.lrotations.index(ri)
riinv = self.g0.Inverse(el2)
# get element
riinvrrj = self.g0.tmult[riinv, rrj]
# translate to rotation element and check
# if in subgroup
el3 = self.g0.lrotations[riinvrrj]
if el3 not in coset[0]:
continue
# if in subgroup, look up position of element
elem = g1.lrotations.index(el3)
ind2 = slice(c2*dim,(c2+1)*dim)
# set induced representation
gamma[ind, ind1, ind2] = ir.mx[elem]
return gamma
def sort_momenta(self):
# check if cosets exists
if self.coset1 is None or self.coset2 is None:
self.smomenta1 = None
self.smomenta2 = None
return
# search for conjugacy class so that
# R*p_ref = p
res1 = []
res2 = []
for p, p1, p2 in self.allmomenta:
#print("momentum coset search")
done = False
# check if already in list
for r in res1:
if utils._eq(r[0],p1):
done=True
break
# if not get coset
if not done:
#print("%r not in list" % p1)
for i, c in enumerate(self.coset1):
t = self.check_coset(self.pref1, p1, c)
#print(t)
if np.all(t):
#print(" in coset %d" %i)
res1.append((p1, i))
break
#else:
# print(" not in coset %d" %i)
#else:
# print("%r already in list" % p1)
done = False
# check if already in list
for r in res2:
if utils._eq(r[0],p2):
done=True
break
if not done:
for i, c in enumerate(self.coset2):
t = self.check_coset(self.pref2, p2, c)
if np.all(t):
res2.append((p2, i))
break
self.smomenta1 = res1
if len(self.smomenta1) != len(self.momenta1):
print("some vectors not sorted")
self.smomenta2 = res2
if len(self.smomenta2) != len(self.momenta2):
print("some vectors not sorted")
def check_coset(self, pref, p, coset):
res = []
for elem in coset:
rot = _all_rotations[elem]
rvec = rot.rot_vector(pref)
c1 = utils._eq(rvec, p)
c2 = utils._eq(rvec, -p)
if c1 or c2:
res.append(True)
else:
res.append(False)
return res
def check_all_cosets(self, p, p1, p2):
j1, j2 = None, None
i1, i2 = None, None
for m, j in self.smomenta1:
if utils._eq(p1,m):
j1 = j
break
for m, j in self.smomenta2:
if utils._eq(p2,m):
j2 = j
break
for m, k1, k2 in self.i1i2:
if utils._eq(p, m):
i1, i2 = k1, k2
break
return j1, j2, i1, i2
def calc_pion_cg(self, p, p1, p2, irname):
"""Calculate the elements of the Clebsch-Gordan matrix.
Assumes that p=p1+p2, where all three are 3-vectors.
"""
# get irrep of group g
ir = self.g.instances[self.g.lirreps.index(irname)]
# j1 and j2 are the conjugacy classes containing
# the given momenta p1 and p2
j1, j2, i1, i2 = self.check_all_cosets(p, p1, p2)
#print(j1, j2, p1)
cg = np.zeros((ir.dim,ir.dim), dtype=complex)
for ind, r in enumerate(self.g.lrotations):
rep = ir.mx[ind]
# hard coded for pi-pi scattering
g1 = self.gamma1[r,j1, i1]
if utils._eq(g1):
continue
g2 = self.gamma2[r,j2, i2]
if utils._eq(g2):
continue
cg += rep.conj()*g1*g2
cg *= float(ir.dim)/self.g.order
return cg
def get_pion_cg(self, irname):
try:
ind = self.irreps.index(irname)
return irname, self.cgs[ind], self.allmomenta
except:
pass
result = []
# iterate over momenta
for p, p1, p2 in self.allmomenta:
res = self.calc_pion_cg(p, p1, p2, irname)
if res is None:
continue
result.append(res)
result = np.asarray(result)
# check if all coefficients are zero
if utils._eq(result):
cgs = None
else:
# orthonormalize the basis
cgs = self._norm_cgs(result)
self.irreps.append(irname)
self.cgs.append(cgs)
return irname, cgs, self.allmomenta
def _norm_cgs(self, data):
# prepare result array
res = np.zeros(data.shape[:-1], dtype=complex)
# sort by final momentum, so that all final momenta are
# normalized seperately
ind = [[] for x in self.momenta]
for i, m in enumerate(self.allmomenta):
for j, fm in enumerate(self.momenta):
if np.array_equal(m[0], fm):
ind[j].append(i)
break
# norm the data
# set starting variables
mup = 0
for i in range(data.shape[1]):
for j in ind:
tmp = data[j,i,mup]
if np.any(tmp):
norm = np.sqrt(np.vdot(tmp, tmp))
res[j,i] = tmp/norm
mup += 1
return res
def display(data, mom, empty=None):
def _d1(data):
tmp = ["%2d" % x for x in data]
tmp = ",".join(tmp)
tmp = "".join(("(", tmp, ")"))
return tmp
def _d2(data):
tmp = ["%+.3f%+.3fj" % (x.real, x.imag) for x in data]
tmp = ", ".join(tmp)
tmp = "".join(("[", tmp, "]"))
return tmp
count = 0
for d, m in zip(data, mom):
print("% 11s = %11s + %11s => %s" % (\
_d1(m[0]), _d1(m[1]), _d1(m[2]), _d2(d)))
if empty is not None:
count += 1
if count == empty:
count = 0
print("")
def cg_to_pandas(data, mom):
"""Converts the Clebsch-Gordan coefficients for a two-particle operator
into a pandas DataFrame according to the conventions used in subduction.py.
"""
nb_mom = len(mom)
nb_rows = len(data[0])
# convert momenta into tuples (p_so, p_si) and copy for every row
# fill \gamma_5 as gamma structure i. e. two pions hardcoded
# flatten data to one list with row running faster than momentum
df = DataFrame({'p' : [tuple([tuple(m[1]), tuple(m[2])]) \
for m in mom for _i in range(nb_rows)], \
'\gamma' : [(5,5)] * nb_mom*nb_rows, \
'cg-coefficients' :
[cg for cg_row in data for cg in cg_row]}, \
index = pd.Index( range(nb_rows) * nb_mom, name='\mu'))
print df
if __name__ == "__main__":
print("for checks execute the test script")
| gpl-3.0 |
bikong2/scikit-learn | examples/calibration/plot_compare_calibration.py | 241 | 5008 | """
========================================
Comparison of Calibration of Classifiers
========================================
Well calibrated classifiers are probabilistic classifiers for which the output
of the predict_proba method can be directly interpreted as a confidence level.
For instance a well calibrated (binary) classifier should classify the samples
such that among the samples to which it gave a predict_proba value close to
0.8, approx. 80% actually belong to the positive class.
LogisticRegression returns well calibrated predictions as it directly
optimizes log-loss. In contrast, the other methods return biased probilities,
with different biases per method:
* GaussianNaiveBayes tends to push probabilties to 0 or 1 (note the counts in
the histograms). This is mainly because it makes the assumption that features
are conditionally independent given the class, which is not the case in this
dataset which contains 2 redundant features.
* RandomForestClassifier shows the opposite behavior: the histograms show
peaks at approx. 0.2 and 0.9 probability, while probabilities close to 0 or 1
are very rare. An explanation for this is given by Niculescu-Mizil and Caruana
[1]: "Methods such as bagging and random forests that average predictions from
a base set of models can have difficulty making predictions near 0 and 1
because variance in the underlying base models will bias predictions that
should be near zero or one away from these values. Because predictions are
restricted to the interval [0,1], errors caused by variance tend to be one-
sided near zero and one. For example, if a model should predict p = 0 for a
case, the only way bagging can achieve this is if all bagged trees predict
zero. If we add noise to the trees that bagging is averaging over, this noise
will cause some trees to predict values larger than 0 for this case, thus
moving the average prediction of the bagged ensemble away from 0. We observe
this effect most strongly with random forests because the base-level trees
trained with random forests have relatively high variance due to feature
subseting." As a result, the calibration curve shows a characteristic sigmoid
shape, indicating that the classifier could trust its "intuition" more and
return probabilties closer to 0 or 1 typically.
* Support Vector Classification (SVC) shows an even more sigmoid curve as
the RandomForestClassifier, which is typical for maximum-margin methods
(compare Niculescu-Mizil and Caruana [1]), which focus on hard samples
that are close to the decision boundary (the support vectors).
.. topic:: References:
.. [1] Predicting Good Probabilities with Supervised Learning,
A. Niculescu-Mizil & R. Caruana, ICML 2005
"""
print(__doc__)
# Author: Jan Hendrik Metzen <[email protected]>
# License: BSD Style.
import numpy as np
np.random.seed(0)
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.naive_bayes import GaussianNB
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import LinearSVC
from sklearn.calibration import calibration_curve
X, y = datasets.make_classification(n_samples=100000, n_features=20,
n_informative=2, n_redundant=2)
train_samples = 100 # Samples used for training the models
X_train = X[:train_samples]
X_test = X[train_samples:]
y_train = y[:train_samples]
y_test = y[train_samples:]
# Create classifiers
lr = LogisticRegression()
gnb = GaussianNB()
svc = LinearSVC(C=1.0)
rfc = RandomForestClassifier(n_estimators=100)
###############################################################################
# Plot calibration plots
plt.figure(figsize=(10, 10))
ax1 = plt.subplot2grid((3, 1), (0, 0), rowspan=2)
ax2 = plt.subplot2grid((3, 1), (2, 0))
ax1.plot([0, 1], [0, 1], "k:", label="Perfectly calibrated")
for clf, name in [(lr, 'Logistic'),
(gnb, 'Naive Bayes'),
(svc, 'Support Vector Classification'),
(rfc, 'Random Forest')]:
clf.fit(X_train, y_train)
if hasattr(clf, "predict_proba"):
prob_pos = clf.predict_proba(X_test)[:, 1]
else: # use decision function
prob_pos = clf.decision_function(X_test)
prob_pos = \
(prob_pos - prob_pos.min()) / (prob_pos.max() - prob_pos.min())
fraction_of_positives, mean_predicted_value = \
calibration_curve(y_test, prob_pos, n_bins=10)
ax1.plot(mean_predicted_value, fraction_of_positives, "s-",
label="%s" % (name, ))
ax2.hist(prob_pos, range=(0, 1), bins=10, label=name,
histtype="step", lw=2)
ax1.set_ylabel("Fraction of positives")
ax1.set_ylim([-0.05, 1.05])
ax1.legend(loc="lower right")
ax1.set_title('Calibration plots (reliability curve)')
ax2.set_xlabel("Mean predicted value")
ax2.set_ylabel("Count")
ax2.legend(loc="upper center", ncol=2)
plt.tight_layout()
plt.show()
| bsd-3-clause |
sujithvm/internationality-journals | src/SNIPvsourSNIP.py | 3 | 3829 | __author__ = 'Sukrit'
import csv
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
f = open('../output/both_journal_list.txt', 'r') #reading list of journals present in Aminer
x = f.readlines()
xn = []
for line in x:
#line = line.replace('&','and') #converting & to 'and' [UGH]
xn.append(line.rstrip())
SNIP = pd.read_csv("../data/journal_SNIP_values.csv")
journals = SNIP['Source Title'] #taking only 'Source Title' column
jlist = []
i = 0
j = 0
for jname in journals :
for name in xn :
if jname == name :
jlist.append(jname)
i += 1
#print jlist
SNIP_2 = pd.DataFrame()
#now we corroborate SNIP values.
for name in jlist :
SNIP_2 = SNIP_2.append(SNIP[ SNIP['Source Title'] == name ],ignore_index = True) #copy all SNIP/IPP values for which we have citations in Aminer
#print SNIP_2
SNIP_2 = SNIP_2.fillna(0)
SNIP_2010 = SNIP_2[['Source Title','2010 SNIP']].copy() #copying 'Source Title' and '2010 SNIP' columns to new df
#print SNIP_2010
our_SNIP2 = {'Applied Mathematics and Computation': 0 , 'Artificial Intelligence' :1.99047619048 , 'Artificial Intelligence in Medicine' : 0.707346904582 , 'Automatica' : 0.373679952833 , 'Computer Communications' :0.42907910953 , 'Computer Methods and Programs in Biomedicine' :0.379046231074 , 'Computer Networks' : 0.749387157754 , 'Computer Vision and Image Understanding' :1.67294857909 , 'Computers and Education' : 0.704265888257, 'Computers and Geosciences' : 0.188574973214, 'Computers and Graphics' :0.442495274102 , 'Computers and Mathematics with Applications' : 0.329331757583 , 'Computers and Security' : 0 , 'Computers in Human Behavior' :0.650123718628 , 'Discrete Mathematics' : 0 , 'Environmental Modelling and Software' :0.367386629266 ,'Expert Systems with Applications' : 0.588827994966,'Future Generation Computer Systems' :1.07051816557 ,'Games and Economic Behavior' :0.00331943617232,'Information and Management' : 1.03949275362,'Information Processing Letters' : 0 , 'Information Sciences' : 0.453232588419, 'Journal of Approximation Theory' : 0.162687560813, 'Mathematics and Computers in Simulation' :0.173084886128 , 'Neural Networks' :0.678374260303 , 'Neurocomputing' : 0.634644582471, 'Parallel Computing' : 0.712270531401, 'Pattern Recognition' : 1.17715942029, 'Performance Evaluation' :0.713109730849 , 'Robotics and Autonomous Systems' : 0.739277818718, 'Science of Computer Programming' : 0 , 'Signal Processing' :0.554988312295 , 'Speech Communication' :0.540711462451 , 'Systems and Control Letters' : 0 }
our_SNIP = pd.read_csv('../output/SNIP_all_journals.csv',usecols=[1,3])
#print our_SNIP
jname = our_SNIP['Jname']
SNIP_full = pd.DataFrame();
for name in jname :
SNIP_full = SNIP_2010.append(SNIP_2010[ SNIP_2010['Source Title'] == name ],ignore_index = True) #copy all SNIP/IPP values for which we have citations in Aminer
#print len(SNIP_full.index)
xarray = []
yarr = []
yarray = []
i = 0
print SNIP_full
print our_SNIP
#print jlist
#jlist.remove("Games and Economic Behavior")
for name in jname :
if ( our_SNIP['SNIP'][ our_SNIP['Jname']== name ].values != 0 and SNIP_full['2010 SNIP'][ SNIP_full['Source Title'] == name ].values != 0 ) :
xarray.append(our_SNIP['SNIP'][our_SNIP['Jname']== name ].values)
yarr.append(SNIP_full['2010 SNIP'][SNIP_full['Source Title'] == name ].values)
#plt.plot(xarray,yarray,'ro')
#plt.plot(xarray, np.poly1d(np.polyfit(xarray, yarray, 1))(xarray))
#plt.show()
yarr = [float(i) for i in yarr]
for item in yarr :
yarray.append(item)
print xarray
print yarray
print "\n\n"
data = [xarray,yarray]
with open('../data/SNIP_ourSNIP3.csv','wb') as f:
out = csv.writer(f, delimiter=',',quoting=csv.QUOTE_ALL)
out.writerows(zip(*data))
| mit |
ornlneutronimaging/ResoFit | ResoFit/model.py | 1 | 3451 | import numpy as np
import matplotlib.pyplot as plt
from lmfit.lineshapes import pvoigt
from lmfit import Model
def ikeda_carpenter(t, alpha, beta, fraction, t0, norm_factor=1):
_t = t - t0
# _t = t1[np.logical_not(t1 < 0)]
_t[_t < 0] = 0 # t>=0 required
# α=vΣ
# Σ is the macroscopic neutron scattering cross-section of the moderator
# (Σ=1.6 cm-1 for polyethylene in the high energy limit) and
# v is the neutron speed
part1 = 0.5 * alpha * (alpha*_t)**2 * np.exp(-alpha*_t)
part2_1 = alpha**3 * beta / (alpha - beta)**3 # jparc uses alpha**2 instead of alpha**3 in the original function
part2_2 = np.exp(-beta*_t) - np.exp(-alpha*_t) * (1 + (alpha - beta) * _t + 0.5 * (alpha - beta)**2 * _t**2)
part2 = part2_1 * part2_2
f = ((1 - fraction) * part1 + fraction * part2) * norm_factor
return f
def ikeda_carpenter_jparc(t, alpha, beta, fraction, t0, norm_factor=1):
_t = t - t0
# _t = t1[np.logical_not(t1 < 0)]
_t[_t < 0] = 0 # t>=0 required
part1 = 0.5 * alpha * (alpha*_t)**2 * np.exp(-alpha*_t)
part2_1 = alpha**2 * beta / (alpha - beta)**3 # jparc uses alpha**2 instead of alpha**3 in the original function
part2_2 = np.exp(-beta*_t) - np.exp(-alpha*_t) * (1 + (alpha - beta) * _t + 0.5 * (alpha - beta)**2 * _t**2)
part2 = part2_1 * part2_2
f = ((1 - fraction) * part1 + fraction * part2) * norm_factor
return f
def cole_windsor(t, sig1, sig2, gamma, fraction, t0, norm_factor=1):
_t = t - t0
f = []
for each_t in _t:
# for F1
if each_t <= 0:
f1 = np.exp(-0.5 * (each_t / sig1) ** 2)
if 0 < each_t <= gamma * sig2 ** 2:
f1 = np.exp(-0.5 * (each_t / sig2) ** 2)
if gamma * sig2 ** 2 < each_t:
f1 = np.exp(0.5 * (gamma * sig2) ** 2 - gamma * each_t)
# for F2
if each_t <= 0:
f2 = np.exp(-0.5 * (each_t / sig1) ** 2)
if 0 < each_t <= gamma * sig2 ** 2:
f2 = np.exp(0.5 * (each_t / sig2) ** 2)
if gamma * sig2 ** 2 < each_t:
f2 = np.exp(0.5 * (gamma * sig2) ** 2 - gamma * each_t)
each_f = norm_factor * ((1 - fraction) * f1 + fraction * f2)
f.append(each_f)
f = np.array(f)
return f
def pseudo_voigt(t, beta, sigma, fraction):
gauss = 1 / (1 + (t / beta)**2)
lorentz = np.exp(-(t / sigma)**2)
f = (1 - fraction) * gauss + fraction * lorentz
return f
def cole_windsor_jparc(t, sig1, sig2, gam1, gam2, fraction, t0, norm_factor=1):
_t = t - t0
f = []
for each_t in _t:
# for F1
if each_t <= 0:
f1 = np.exp(-0.5 * (each_t / sig1) ** 2)
if 0 < each_t <= gam1 * sig2 ** 2:
f1 = np.exp(-0.5 * (each_t / sig2) ** 2)
if gam1 * sig2 ** 2 < each_t:
f1 = np.exp(0.5 * (gam1 * sig2) ** 2 - gam1 * each_t)
# for F2
if each_t <= 0:
f2 = np.exp(-0.5 * (each_t / sig1) ** 2)
if 0 < each_t <= gam2 * sig2 ** 2:
f2 = np.exp(0.5 * (each_t / sig2) ** 2)
if gam2 * sig2 ** 2 < each_t:
f2 = np.exp(0.5 * (gam2 * sig2) ** 2 - gam2 * each_t)
each_f = norm_factor * ((1 - fraction) * f1 + fraction * f2)
f.append(each_f)
f = np.array(f)
return f
def loglog_linear(x, slope, intercept):
_x_log = np.log10(x)
_y_log = slope * _x_log + intercept
y = np.power(10, _y_log)
return y
| bsd-3-clause |
Windy-Ground/scikit-learn | examples/linear_model/plot_iris_logistic.py | 283 | 1678 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Logistic Regression 3-class Classifier
=========================================================
Show below is a logistic-regression classifiers decision boundaries on the
`iris <http://en.wikipedia.org/wiki/Iris_flower_data_set>`_ dataset. The
datapoints are colored according to their labels.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model, datasets
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features.
Y = iris.target
h = .02 # step size in the mesh
logreg = linear_model.LogisticRegression(C=1e5)
# we create an instance of Neighbours Classifier and fit the data.
logreg.fit(X, Y)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
Z = logreg.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure(1, figsize=(4, 3))
plt.pcolormesh(xx, yy, Z, cmap=plt.cm.Paired)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=Y, edgecolors='k', cmap=plt.cm.Paired)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
pchrista/AliPhysics | PWGHF/vertexingHF/macros/AnalyseAODMismatchTree.py | 15 | 5530 | import uproot
import numpy as np
import pandas as pd
from ROOT import TH1F, TH2F, TCanvas, TLegend
from ROOT import kRed, kAzure, gStyle, kIsland
def GetMaskOfBits(bits):
'''
Helper method to get bit mask from bits
Arguments
----------
- list of bits
Returns
----------
- mask corresponding to the input bits
'''
mask = 0
for bit in bits:
mask += 2**bit
return mask
def FilterBitDf(dfToFilter, column, bitsToTest, logic='or'):
'''
Method to apply selection testing one or more bits
Arguments
----------
- pandas dataframe to filter
- colum with bitmap
- list of bits to test
- logic to combine the bits (and, or)
Returns
----------
- filtered pandas dataframe
'''
maskOfBits = GetMaskOfBits(bitsToTest)
flags = dfToFilter[column].astype(int) & maskOfBits
if logic == 'or':
flags = flags.astype('bool')
elif logic == 'and':
flags -= maskOfBits
flags = ~flags.astype('bool')
elif logic == 'not':
flags = ~flags.astype('bool')
else:
print('Error: only and, or, and not logics are supported for bitwise operations')
return None
dfFilt = dfToFilter[flags.values]
return dfFilt
def main():
'''
Main function
'''
prod = 'LHC20g11a'
tree = uproot.open('AnalysisResults.root')['AOD_dAOD_Matching/fTreeMismatch']
df = tree.pandas.df()
df = df.sort_values(by=['file_name'])
pd.set_option('display.max_colwidth', None)
nFiles = len(df)
nEvents = sum(df['n_events'].values)
dfSel = {'good_files': df.query('mismatch_status == 0'),
'mism_ev': FilterBitDf(df, 'mismatch_status', [0]),
'mism_TProcessID': FilterBitDf(df, 'mismatch_status', [1]),
'mism_cand': FilterBitDf(df, 'mismatch_status', [2]),
'mism_ev_and_TProcessID': FilterBitDf(df, 'mismatch_status', [0, 1], logic='and'),
'mism_ev_and_cand': FilterBitDf(df, 'mismatch_status', [0, 2], logic='and'),
'mism_cand_and_TProcessID': FilterBitDf(df, 'mismatch_status', [1, 2], logic='and'),
'mism_all': FilterBitDf(df, 'mismatch_status', [1, 2, 3], logic='and')}
fracFiles, fracEv = {}, {}
for mism in dfSel:
fracFiles[mism] = len(dfSel[mism]) / nFiles
fracEv[mism] = sum(dfSel[mism]['n_events'].values) / nEvents
print(f'\nfraction of files with flag \"{mism}\": {fracFiles[mism]}')
print(f'fraction of events with flag \"{mism}\": {fracEv[mism]}')
gStyle.SetTitleSize(0.045, 'xy')
gStyle.SetLabelSize(0.04, 'xy')
gStyle.SetPadTopMargin(0.035)
gStyle.SetPadRightMargin(0.035)
gStyle.SetPadBottomMargin(0.15)
gStyle.SetPadLeftMargin(0.12)
gStyle.SetPadTickX(1)
gStyle.SetPadTickY(1)
gStyle.SetOptStat(0)
gStyle.SetPalette(kIsland)
hAODMism = TH1F('hAODMism', ';;fraction', 8, 0.5, 8.5)
hAODMism.SetLineWidth(2)
hAODMism.SetLineColor(kRed+1)
hAODMism.GetYaxis().SetRangeUser(1.e-5, 1.)
hEventMism = TH1F('hEventMism', ';;fraction', 8, 0.5, 8.5)
hEventMism.SetLineWidth(2)
hEventMism.SetLineColor(kAzure+4)
hEventMism.GetYaxis().SetRangeUser(1.e-5, 1.)
for iMism, mism in enumerate(dfSel):
hAODMism.GetXaxis().SetBinLabel(iMism+1, mism)
hEventMism.GetXaxis().SetBinLabel(iMism+1, mism)
hAODMism.SetBinContent(iMism+1, fracFiles[mism])
hEventMism.SetBinContent(iMism+1, fracEv[mism])
leg = TLegend(0.6, 0.7, 0.8, 0.9)
leg.SetBorderSize(0)
leg.SetFillStyle(0)
leg.AddEntry(hAODMism, 'AOD files', 'l')
leg.AddEntry(hEventMism, 'events', 'l')
cMismFrac = TCanvas('cMismFrac', '', 1920, 1080)
cMismFrac.SetLogy()
hAODMism.Draw()
hEventMism.Draw('same')
leg.Draw()
cMismFrac.Modified()
cMismFrac.Update()
dfSel['mism_cand'][['file_name']].to_csv(f'AOD_mismatch_{prod}_cand.txt', header=False, index=False)
dfSel['mism_ev'][['file_name']].to_csv(f'AOD_mismatch_{prod}_nevents.txt', header=False, index=False)
dfSel['mism_TProcessID'][['file_name']].to_csv(f'AOD_mismatch_{prod}_TProcessID.txt', header=False, index=False)
cMismFrac.SaveAs(f'AODMismatch_fractions_{prod}.pdf')
# check for files not tested (jobs failed)
runs = np.unique(df['run_number'].values)
nRuns = len(runs)
for iRun, run in enumerate(runs):
dfRunSel = df.query(f'run_number == {run}')
lastProcessedFile = list(dfRunSel['file_name'].values)[-1]
numLastProcFile = int(lastProcessedFile.decode().rpartition('AOD/')[2].rpartition('/')[0])
hFilesTested = TH2F(f'hFilesTested{run}', f'run {run};AOD number;', numLastProcFile, 0.5, numLastProcFile+0.5, 1, 0., 1.)
hFilesTested.GetZaxis().SetRangeUser(-0.001, 1.)
cFilesTested = TCanvas(f'cFilesTested{run}', '', 1920, 1080)
cFilesTested.SetTopMargin(0.12)
cFilesTested.SetRightMargin(0.12)
for fileName in dfRunSel['file_name']:
numProcFile = int(fileName.decode().rpartition('AOD/')[2].rpartition('/')[0])
hFilesTested.Fill(numProcFile, 0.5)
hFilesTested.Draw('colz')
cFilesTested.Modified()
cFilesTested.Update()
if iRun == 0:
cFilesTested.SaveAs(f'FilesTested_{prod}.pdf[')
cFilesTested.SaveAs(f'FilesTested_{prod}.pdf')
if iRun == nRuns-1:
cFilesTested.SaveAs(f'FilesTested_{prod}.pdf]')
input()
# call main function
main() | bsd-3-clause |
arlewis/galaxy_cutouts | extract_stamp.py | 1 | 35179 | import numpy as np
import astropy.io.fits
import astropy.wcs
import montage_wrapper as montage
from matplotlib.path import Path
import os
import sys
import shutil
import glob
import time
import scipy.ndimage as sp
from pdb import set_trace
# directories that contain the input data
_TOP_DIR = '/data/tycho/0/leroy.42/allsky/'
_INDEX_DIR = os.path.join(_TOP_DIR, 'z0mgs/')
_WISE_DIR = os.path.join(_TOP_DIR, 'unwise', 'atlas')
# directories to do the work in
_WORK_DIR = '/data/tycho/0/leroy.42/allsky/galex/atlas'
#_WORK_DIR = '/data/tycho/0/lewis.1590/atlas/'
_MOSAIC_DIR = os.path.join(_WORK_DIR, 'cutouts')
# CALIBRATION FROM GALEX COUNTS TO ABMAG
FUV2AB = 18.82
NUV2AB = 20.08
UV2AB = {'fuv': FUV2AB, 'nuv': NUV2AB}
GALEX_PIX_AS = 1.5 ## galex pixel scale in arcseconds -- from documentation
class GalaxyHeader(object):
def __init__(self, name, gal_dir, ra_ctr, dec_ctr, pix_len, pix_scale, factor=1):
self.name = name
self.gal_dir = gal_dir
self.ra_ctr = ra_ctr
self.dec_ctr = dec_ctr
self.hdr, self.hdrfile = self._create_hdr_output(pix_len, pix_scale, factor=1)
self.hdr_ext, self.hdrfile_ext = self._create_hdr_output(pix_len, pix_scale, factor=factor)
def _create_hdr_obj(self, pix_len, pix_scale):
"""
Create a FITS header
Parameters
----------
ra_ctr : float
RA of center of galaxy
dec_ctr : float
Dec of center of galaxy
pix_len : float
Length of each axis (square, so the same for x and y)
pix_scale : float
Pixel scale in degrees
Returns
-------
hdr : astropy Header() object
Newly created header object
"""
hdr = astropy.io.fits.Header()
hdr['NAXIS'] = 2
hdr['NAXIS1'] = pix_len
hdr['NAXIS2'] = pix_len
hdr['CTYPE1'] = 'RA---TAN'
hdr['CRVAL1'] = float(self.ra_ctr)
hdr['CRPIX1'] = (pix_len / 2.) * 1.
hdr['CDELT1'] = -1.0 * pix_scale
hdr['CTYPE2'] = 'DEC--TAN'
hdr['CRVAL2'] = float(self.dec_ctr)
hdr['CRPIX2'] = (pix_len / 2.) * 1.
hdr['CDELT2'] = pix_scale
hdr['EQUINOX'] = 2000
return hdr
def _create_hdr_output(self, size_degrees, pixel_scale, factor=1):
"""
Create a header and write it to an ascii file for use in Montage
Parameters
----------
galname : str
Name of the galaxy
ra_ctr : float
Central RA of galaxy
dec_ctr : float
Central Dec of galaxy
size_degrees : float
size of cutout, in degrees
pixel_scale : float
pixel scale of output in arcseconds per pixel
factor : int, optional
Number by which to multiply size_degrees to extend the size of the cutout for bg modeling. (Default: 1)
Returns
-------
target_hdr : astropy.header object
The output header object
header_file : str
Path to the ascii file containing the header information
"""
pix_len = int(np.ceil(size_degrees * factor / pixel_scale))
hdr = self._create_hdr_obj(pix_len, pixel_scale)
ri_targ, di_targ = self._make_axes(hdr)
sz_out = ri_targ.shape
outim = ri_targ * np.nan
prihdu = astropy.io.fits.PrimaryHDU(data=outim, header=hdr)
target_hdr = prihdu.header
suff = '_template.hdr'
if factor != 1:
suff = suff.replace('.hdr', '_ext.hdr')
header_file = os.path.join(self.gal_dir, self.name + suff)
self.write_headerfile(header_file, target_hdr)
return target_hdr, header_file
def _make_axes(self, hdr, quiet=False, novec=False, vonly=False, simple=False):
"""
Create axes arrays for the new mosaiced image. This is a simple translation to Python of Adam's
IDL routine of the same name.
Parameters
----------
hdr : FITS header object
FITS header to hold astrometry of desired output image
quiet : bool, optional
NOT USED
novec : bool
Find RA and Dec for every point (Default: False)
vonly : bool
Return only velocity data (Default: False)
simple : bool
Do the simplest thing (Default: False)
Returns
-------
rimg : array
array for ouptut RA
dimg : array
array for output Dec
"""
# PULL THE IMAGE/CUBE SIZES FROM THE HEADER
naxis = int(hdr['NAXIS'])
naxis1 = int(hdr['NAXIS1'])
naxis2 = int(hdr['NAXIS2'])
if naxis > 2:
naxis3 = hdr['NAXIS3']
## EXTRACT FITS ASTROMETRY STRUCTURE
ww = astropy.wcs.WCS(hdr)
#IF DATASET IS A CUBE THEN WE MAKE THE THIRD AXIS IN THE SIMPLEST WAY POSSIBLE (NO COMPLICATED ASTROMETRY WORRIES FOR FREQUENCY INFORMATION)
if naxis > 3:
#GRAB THE RELEVANT INFORMATION FROM THE ASTROMETRY HEADER
cd = ww.wcs.cd
crpix = ww.wcs.crpix
cdelt = ww.wcs.crelt
crval = ww.wcs.crval
if naxis > 2:
# MAKE THE VELOCITY AXIS (WILL BE M/S)
v = np.arange(naxis3) * 1.0
vdif = v - (hdr['CRPIX3']-1)
vaxis = (vdif * hdr['CDELT3'] + hdr['CRVAL3'])
# CUT OUT HERE IF WE ONLY WANT VELOCITY INFO
if vonly:
return vaxis
#IF 'SIMPLE' IS CALLED THEN DO THE REALLY TRIVIAL THING:
if simple:
print('Using simple aproach to make axes.')
print('BE SURE THIS IS WHAT YOU WANT! It probably is not.')
raxis = np.arange(naxis1) * 1.0
rdif = raxis - (hdr['CRPIX1'] - 1)
raxis = (rdif * hdr['CDELT1'] + hdr['CRVAL1'])
daxis = np.arange(naxis2) * 1.0
ddif = daxis - (hdr['CRPIX1'] - 1)
daxis = (ddif * hdr['CDELT1'] + hdr['CRVAL1'])
rimg = raxis # (fltarr(naxis2) + 1.)
dimg = (np.asarray(naxis1) + 1.) # daxis
return rimg, dimg
# OBNOXIOUS SFL/GLS THING
glspos = ww.wcs.ctype[0].find('GLS')
if glspos != -1:
ctstr = ww.wcs.ctype[0]
newtype = 'SFL'
ctstr.replace('GLS', 'SFL')
ww.wcs.ctype[0] = ctstr
print('Replaced GLS with SFL; CTYPE1 now =' + ww.wcs.ctype[0])
glspos = ww.wcs.ctype[1].find('GLS')
if glspos != -1:
ctstr = ww.wcs.ctype[1]
newtype = 'SFL'
ctstr.replace('GLS', 'SFL')
ww.wcs.ctype[1] = ctstr
print('Replaced GLS with SFL; CTYPE2 now = ' + ww.wcs.ctype[1])
# CALL 'xy2ad' TO FIND THE RA AND DEC FOR EVERY POINT IN THE IMAGE
if novec:
rimg = np.zeros((naxis1, naxis2))
dimg = np.zeros((naxis1, naxis2))
for i in range(naxis1):
j = np.asarray([0 for i in xrange(naxis2)])
pixcrd = np.array([[zip(float(i), float(j))]], numpy.float_)
ra, dec = ww.all_pix2world(pixcrd, 1)
rimg[i, :] = ra
dimg[i, :] = dec
else:
ximg = np.arange(naxis1) * 1.0
yimg = np.arange(naxis1) * 1.0
X, Y = np.meshgrid(ximg, yimg, indexing='xy')
ss = X.shape
xx, yy = X.flatten(), Y.flatten()
pixcrd = np.array(zip(xx, yy), np.float_)
img_new = ww.all_pix2world(pixcrd, 0)
rimg_new, dimg_new = img_new[:,0], img_new[:,1]
rimg = rimg_new.reshape(ss)
dimg = dimg_new.reshape(ss)
# GET AXES FROM THE IMAGES. USE THE CENTRAL COLUMN AND CENTRAL ROW
raxis = np.squeeze(rimg[:, naxis2/2])
daxis = np.squeeze(dimg[naxis1/2, :])
return rimg, dimg
def write_headerfile(self, header_file, header):
"""
Write out the header for the output mosaiced image
Parameters
----------
header_file : str
Path to file to which to write header
header : array
The header to which to write to ASCII file
"""
f = open(header_file, 'w')
for iii in range(len(header)):
outline = str(header[iii:iii+1]).strip().rstrip('END').strip()+'\n'
f.write(outline)
f.close()
def append2hdr(self, keyword=None, value=None, ext=False):
"""
Append information to the header and write to ASCII file
Parameters
----------
headerfile : str
The path to the ascii file containing the header information
keyword : str, optional
The keyword in the header that you want to create (Default: None)
value : multiple, optional
The value to apply to the keyword (Default: None)
"""
if keyword is not None:
if ext:
self.hdr_ext[keyword] = value
self.write_headerfile(self.hdrfile_ext, self.hdr_ext)
else:
self.hdr[keyword] = value
self.write_headerfile(self.hdrfile, self.hdr)
def calc_tile_overlap(ra_ctr, dec_ctr, pad=0.0, min_ra=0., max_ra=180., min_dec=-90., max_dec=90.):
"""
Find all tiles that fall within a given overlap (pad) of (ra_ctr, dec_ctr)
Parameters
----------
ra_ctr : float
Central RA
dec_ctr : float
Central Dec
pad : float, optional
Size of region about center (Default: 0.0)
min_ra : float. optional
Min RA of box to search in for overlaps (Default: 0.)
max_ra : float, optional
Max RA of box to search in (Default 180.)
min_dec : float, optional
Min Dec of box to search in (Default: -90.)
max_dec : float, optional
Max Dec of box to search in (Default: 90.)
Returns
-------
overlap : bool array
Bool arrat indicatinng which tiles in the index file fall within the given region
"""
overlap = ((min_dec - pad) < dec_ctr) & ((max_dec + pad) > dec_ctr)
#TRAP HIGH LATITUDE CASE AND (I GUESS) TOSS BACK ALL TILES. DO BETTER LATER
mean_dec = (min_dec + max_dec) * 0.5
if np.abs(dec_ctr) + pad > 88.0:
return overlap
ra_pad = pad / np.cos(np.radians(mean_dec))
# MERIDIAN CASES
merid = np.where(max_ra < min_ra)
overlap[merid] = overlap[merid] & ( ((min_ra-ra_pad) < ra_ctr) | ((max_ra+ra_pad) > ra_ctr) )[merid]
# BORING CASE
normal = np.where(max_ra > min_ra)
overlap[normal] = overlap[normal] & ((((min_ra-ra_pad) < ra_ctr) & ((max_ra+ra_pad) > ra_ctr)))[normal]
return overlap
def galex(band='fuv', ra_ctr=None, dec_ctr=None, size_deg=None, index=None, name=None, pgcname=None, model_bg=True, weight_ims=True, convert_mjysr=True, desired_pix_scale=GALEX_PIX_AS, imtype='intbgsub', wttype='rrhr'):
"""
Create cutouts of a galaxy in a single GALEX band.
Parameters
----------
band : str
GALEX band to use
ra_ctr : float
Central RA of galaxy
dec_ctr : float
Central Dec of galaxy
size_deg : float
Desired side length of each cutout, in degrees
index : array, optional
Structured array containing the galbase information. The default is to read it in inside this code. (Default: None)
name : str, optional
Name of the galaxy for which to generate a cutout
pgcname : str, optional
PGC name of the galaxy
model_bg : bool, optional
Model the background of the mosaiced image (Default: False)
weight_ims : bool, optional
weight the input images with the weights images
convert_mjysr : bool, optional
convert input images from counts/sec to MJy/sr
desired_pix_scale : float, optional
Desired pixel scale of output image. Default is currently set to GALEX pixel scale (Default: 1.5)
imtype : str, optional
input image type to use from galex (Default: int)
wttype : str, optional
input weights image type to use from galex (Default: rrhr)
"""
ttype = 'galex'
data_dir = os.path.join(_TOP_DIR, ttype, 'sorted_tiles')
problem_file = os.path.join(_WORK_DIR, 'problem_galaxies_{}.txt'.format(band))
numbers_file = os.path.join(_WORK_DIR, 'gal_reproj_info_{}.txt'.format(band))
galaxy_mosaic_file = os.path.join(_MOSAIC_DIR, '_'.join([pgcname, band]).upper() + '.FITS')
if not os.path.exists(galaxy_mosaic_file):
start_time = time.time()
print pgcname, band.upper()
# READ THE INDEX FILE (IF NOT PASSED IN)
if index is None:
indexfile = os.path.join(_INDEX_DIR, 'galex_index_file.fits')
ext = 1
index, hdr = astropy.io.fits.getdata(indexfile, ext, header=True)
# CALCULATE TILE OVERLAP
tile_overlaps = calc_tile_overlap(ra_ctr, dec_ctr, pad=size_deg,
min_ra=index['MIN_RA'],
max_ra=index['MAX_RA'],
min_dec=index['MIN_DEC'],
max_dec=index['MAX_DEC'])
# FIND OVERLAPPING TILES WITH RIGHT BAND
# index file set up such that index['fuv'] = 1 where fuv and
# index['nuv'] = 1 where nuv
ind = np.where((index[band]) & tile_overlaps)
# MAKE SURE THERE ARE OVERLAPPING TILES
ct_overlap = len(ind[0])
if ct_overlap == 0:
with open(problem_file, 'a') as myfile:
myfile.write(pgcname + ': ' + 'No overlapping tiles\n')
return
pix_scale = desired_pix_scale / 3600. # 1.5 arbitrary: how should I set it?
try:
# CREATE NEW TEMP DIRECTORY TO STORE TEMPORARY FILES
gal_dir = os.path.join(_WORK_DIR, '_'.join([pgcname, band]).upper())
os.makedirs(gal_dir)
# MAKE HEADER AND EXTENDED HEADER AND WRITE TO FILE
gal_hdr = GalaxyHeader(pgcname, gal_dir, ra_ctr, dec_ctr, size_deg, pix_scale, factor=3)
# GATHER THE INPUT FILES
input_dir = os.path.join(gal_dir, 'input')
if not os.path.exists(input_dir):
os.makedirs(input_dir)
nfiles = get_input(index, ind, data_dir, input_dir, hdr=gal_hdr)
im_dir, wt_dir = input_dir, input_dir
# WRITE TABLE OF INPUT IMAGE INFORMATION
input_table = os.path.join(im_dir, 'input.tbl')
montage.mImgtbl(im_dir, input_table, corners=True)
if convert_mjysr:
converted_dir = os.path.join(gal_dir, 'converted')
if not os.path.exists(converted_dir):
os.makedirs(converted_dir)
convert_to_flux_input(im_dir, converted_dir, band, desired_pix_scale, imtype=imtype)
im_dir = converted_dir
# MASK IMAGES
masked_dir = os.path.join(gal_dir, 'masked')
im_masked_dir = os.path.join(masked_dir, imtype)
wt_masked_dir = os.path.join(masked_dir, wttype)
for outdir in [masked_dir, im_masked_dir, wt_masked_dir]:
os.makedirs(outdir)
mask_images(im_dir, wt_dir, im_masked_dir, wt_masked_dir, imtype=imtype, wttype=wttype)
im_dir = im_masked_dir
wt_dir = wt_masked_dir
# REPROJECT IMAGES WITH EXTENDED HEADER
reprojected_dir = os.path.join(gal_dir, 'reprojected')
reproj_im_dir = os.path.join(reprojected_dir, imtype)
reproj_wt_dir = os.path.join(reprojected_dir, wttype)
for outdir in [reprojected_dir, reproj_im_dir, reproj_wt_dir]:
os.makedirs(outdir)
reproject_images(gal_hdr.hdrfile_ext, im_dir, reproj_im_dir, imtype)
reproject_images(gal_hdr.hdrfile_ext, wt_dir, reproj_wt_dir, wttype)
im_dir = reproj_im_dir
wt_dir = reproj_wt_dir
# MODEL THE BACKGROUND IN THE IMAGE FILES WITH THE EXTENDED HEADER
if model_bg:
bg_model_dir = os.path.join(gal_dir, 'background_model')
diff_dir = os.path.join(bg_model_dir, 'differences')
corr_dir = os.path.join(bg_model_dir, 'corrected')
for outdir in [bg_model_dir, diff_dir, corr_dir]:
os.makedirs(outdir)
bg_model(im_dir, bg_model_dir, diff_dir, corr_dir, gal_hdr.hdrfile_ext, im_type=imtype, level_only=False)
im_dir = os.path.join(corr_dir, imtype)
# WEIGHT IMAGES
if weight_ims:
weight_dir = os.path.join(gal_dir, 'weighted')
im_weight_dir = os.path.join(weight_dir, imtype)
wt_weight_dir = os.path.join(weight_dir, wttype)
for outdir in [weight_dir, im_weight_dir, wt_weight_dir]:
os.makedirs(outdir)
weight_images(im_dir, wt_dir, weight_dir, im_weight_dir, wt_weight_dir, imtype=imtype, wttype=wttype)
im_dir = im_weight_dir
wt_dir = wt_weight_dir
# CREATE THE METADATA TABLES NEEDED FOR COADDITION
weight_table = create_table(wt_dir, dir_type=wttype)
weighted_table = create_table(im_dir, dir_type=imtype)
# COADD THE REPROJECTED, WEIGHTED IMAGES AND THE WEIGHT IMAGES WITH THE REGULAR HEADER FILE
penultimate_dir = os.path.join(gal_dir, 'large_mosaic')
final_dir = os.path.join(gal_dir, 'mosaic')
for outdir in [penultimate_dir, final_dir]:
os.makedirs(outdir)
coadd(gal_hdr.hdrfile, penultimate_dir, im_dir, output=imtype, add_type='mean')
coadd(gal_hdr.hdrfile, penultimate_dir, wt_dir, output=wttype, add_type='mean')
# DIVIDE OUT THE WEIGHTS AND CONVERT TO MJY/SR
imagefile, wtfile = finish_weight(penultimate_dir, imtype=imtype, wttype=wttype)
# COPY IMAGE AND WEIGHTS MOSAIC TO FINAL DIRECTORY
outfile = os.path.join(final_dir, 'final_mosaic.fits')
shutil.copy(imagefile, outfile)
shutil.copy(wtfile, os.path.join(final_dir, '{}_mosaic.fits'.format(wttype)))
# COPY MOSAIC FILES TO CUTOUTS DIRECTORY
mosaic_file = os.path.join(final_dir, 'final_mosaic.fits')
weight_file = os.path.join(final_dir, '{}_mosaic.fits'.format(wttype))
newsuffs = ['.FITS', '_weight.FITS']
oldfiles = [mosaic_file, weight_file]
newfiles = ['_'.join([pgcname, band]).upper() + s for s in newsuffs]
for files in zip(oldfiles, newfiles):
shutil.copy(files[0], os.path.join(_MOSAIC_DIR, files[1]))
# REMOVE TEMP GALAXY DIRECTORY AND EXTRA FILES
shutil.rmtree(gal_dir, ignore_errors=True)
# NOTE TIME TO FINISH
stop_time = time.time()
total_time = (stop_time - start_time) / 60.
# WRITE OUT THE NUMBER OF TILES THAT OVERLAP THE GIVEN GALAXY
out_arr = [pgcname, band.upper(), nfiles, np.around(total_time, 2)]
with open(numbers_file, 'a') as nfile:
nfile.write('{0: >10}'.format(out_arr[0]))
nfile.write('{0: >6}'.format(out_arr[1]))
nfile.write('{0: >6}'.format(out_arr[2]))
nfile.write('{0: >6}'.format(out_arr[3]) + '\n')
# SOMETHING WENT WRONG -- WRITE ERROR TO FILE
except Exception as inst:
me = sys.exc_info()[0]
with open(problem_file, 'a') as myfile:
myfile.write(pgcname + ': ' + str(me) + ': '+str(inst)+'\n')
shutil.rmtree(gal_dir, ignore_errors=True)
return
def get_input(index, ind, data_dir, input_dir, hdr=None):
"""
Gather the input files for creating mosaics and copy them into a temporary directory
Parameters
----------
index : np.array
structured array from galbase FITS file
ind : np.array(np.dtype(int))
An array of indices into the index locating the correct files
data_dir : str
Path to location of raw data downloaded from GALEX (or other) server
input_dir : str
Path to newly created temporary directory for storing temp files used in mosaicing
Returns
-------
len(input_files) : int
The number of files that will go into the mosaic.
"""
infiles = index[ind[0]]['fname']
wtfiles = index[ind[0]]['rrhrfile']
flgfiles = index[ind[0]]['flagfile']
infiles = [os.path.join(data_dir, f) for f in infiles]
wtfiles = [os.path.join(data_dir, f) for f in wtfiles]
flgfiles = [os.path.join(data_dir, f) for f in flgfiles]
for i, infile in enumerate(infiles):
basename = os.path.basename(infile)
new_in_file = os.path.join(input_dir, basename)
os.symlink(infile, new_in_file)
if hdr is not None:
keyw = 'INFILE{}'.format(str(i+1).zfill(2))
hdr.append2hdr(keyword=keyw, value=basename, ext=False)
for wtfile in wtfiles:
basename = os.path.basename(wtfile)
new_wt_file = os.path.join(input_dir, basename)
os.symlink(wtfile, new_wt_file)
for flgfile in flgfiles:
basename = os.path.basename(flgfile)
new_flg_file = os.path.join(input_dir, basename)
os.symlink(flgfile, new_flg_file)
return len(infiles)
def mask_images(im_dir, wt_dir, im_masked_dir, wt_masked_dir, imtype='intbgsub', wttype='rrhr'):
"""
Mask pixels in the input images
Parameters
----------
im_dir : str
Path to directory containing the images
wt_dir : str
Path to directory containing the weights
im_masked_dir : str
Path to temp directory for this galaxy in which to store masked image files
wt_masked_dir : str
Path to temp directory for this galaxy in which to store masked weight files
"""
int_suff, rrhr_suff = '*-{}.fits'.format(imtype), '*-{}.fits'.format(wttype)
int_images = sorted(glob.glob(os.path.join(im_dir, int_suff)))
rrhr_images = sorted(glob.glob(os.path.join(wt_dir, rrhr_suff)))
for i in range(len(int_images)):
image_infile = int_images[i]
wt_infile = rrhr_images[i]
image_outfile = os.path.join(im_masked_dir, os.path.basename(image_infile))
wt_outfile = os.path.join(wt_masked_dir, os.path.basename(wt_infile))
mask_galex(image_infile, wt_infile, image_outfile, wt_outfile)
def mask_galex(intfile, wtfile, out_intfile, out_wtfile, chip_rad=1400, chip_x0=1920, chip_y0=1920):
"""
The actual masking routine. Selects pixels that are close to the edges of the chips
or that have bad values, and masks them.
Parameters
----------
intfile : str
input image file
wtfile : str
input weight file
chip_rad : int
Radius of the GALEX chip to use. The actual radius of data is ~1500 pixels. There are known edge effects. (Default: 1400)
chip_x0 : int
Center of GALEX chip on the x-axis (Default: 1920)
chip_y0 : int
Center of GALEX chip on the y-axis (Default: 1920)
out_intfile : str, optional
Path to output, masked image file. If not included, it will default to replacing the input file name as
'.fits' --> '_masked.fits' (Default: None)
out_wtfile : str, optional
Path to output, masked weight file. If not included, it will default to replacing the input file name as
'.fits' --> '_masked.fits' (Default: None)
"""
if not os.path.exists(out_intfile):
# read in the data
data, hdr = astropy.io.fits.getdata(intfile, header=True)
wt, whdr = astropy.io.fits.getdata(wtfile, header=True)
# determine distance of each pixel from the center
x = np.arange(data.shape[1]).reshape(1, -1) + 1
y = np.arange(data.shape[0]).reshape(-1, 1) + 1
r = np.sqrt((x - chip_x0)**2 + (y - chip_y0)**2)
# make pixel selections for masking
i = (r > chip_rad)
j = (wt == -1.1e30)
# mask pixels that meet eithr of the above criterion
data = np.where(i | j, np.nan, data) #0
wt = np.where(i | j, np.nan, wt) #1e-20
# write new data to file
astropy.io.fits.writeto(out_intfile, data, hdr)
astropy.io.fits.writeto(out_wtfile, wt, whdr)
def reproject_images(template_header, input_dir, reproj_dir, imtype, whole=True, exact=True, corners=True, img_list=None):
"""
Reproject input images to a new WCS as given by a template header
Parameters
----------
template_header : ascii file
ASCII file containing the WCS to which you want to reproject. This is what Montage requires.
input_dir : str
Path to directory containing input data
reproj_imtype_dir :
Path to new directory for storing reprojected data
imtype : str
The type of image you are reprojecting; one of [int, rrhr]
whole : bool, optional
Montage argument: Force reprojection of whole images, even if they exceed the area of the FITS
header template (Default: True)
exact : bool, optional
Montage argument: Flag indicating output image should exactly match the FITS header template,
and not crop off blank pixels (Default: True)
corners : bool, optional
Montage argument: Adds 8 columns for the RA and Dec of the image corners to the output metadata table
(Default: True)
img_list : list of strs, optional
Montage argument: only process files with names specified in table img_list, ignoring any other files
in the directory. (Default: None)
"""
# get image metadata from input images
input_table = os.path.join(input_dir, imtype + '_input.tbl')
montage.mImgtbl(input_dir, input_table, corners=corners, img_list=img_list)
# Create reprojection directory, reproject, and get image metadata
stats_table = os.path.join(reproj_dir, imtype+'_mProjExec_stats.log')
montage.mProjExec(input_table, template_header, reproj_dir, stats_table, raw_dir=input_dir,
whole=whole, exact=exact)
reprojected_table = os.path.join(reproj_dir, imtype + '_reprojected.tbl')
montage.mImgtbl(reproj_dir, reprojected_table, corners=corners)
def bg_model(reprojected_dir, bg_model_dir, diff_dir, corr_dir, template_header, im_type='intbgsub', level_only=True):
"""
Model the background for the mosaiced image
Parameters
----------
reprojected_dir : str
Path to temp directory containing reprojected images
bg_model_dir : str
Path to directory inside gal_dir to hold the background modeling information
diff_dir : str
Path to directory inside bg_model_dir to hold the difference images
corr_dir : str
Path to directory inside bg_model_dir to hold the background corrected images
template_header : ascii file
Path to file containing the WCS to which we want to reproject our images
im_type : str
Type of image used (Default: intbgsub)
level_only : bool, optional
Montage argument: Adjust background levels only, don't try to fit the slope (Default: True)
"""
# FIND OVERLAPS
diff_dir = os.path.join(diff_dir, im_type)
os.makedirs(diff_dir)
reprojected_table = os.path.join(reprojected_dir, im_type + '_reprojected.tbl')
diffs_table = os.path.join(diff_dir, 'differences.tbl')
montage.mOverlaps(reprojected_table, diffs_table)
# CALCULATE DIFFERENCES BETWEEN OVERLAPPING IMAGES
montage.mDiffExec(diffs_table, template_header, diff_dir,
proj_dir=reprojected_dir)
# BEST-FIT PLANE COEFFICIENTS
fits_table = os.path.join(diff_dir, 'fits.tbl')
montage.mFitExec(diffs_table, fits_table, diff_dir)
# CALCULATE CORRECTIONS
corr_dir = os.path.join(corr_dir, im_type)
os.makedirs(corr_dir)
corrections_table = os.path.join(corr_dir, 'corrections.tbl')
montage.mBgModel(reprojected_table, fits_table, corrections_table,
level_only=level_only)
# APPLY CORRECTIONS
montage.mBgExec(reprojected_table, corrections_table, corr_dir,
proj_dir=reprojected_dir)
def weight_images(im_dir, wt_dir, weight_dir, im_weight_dir, wt_weight_dir, imtype='intbgsub', wttype='rrhr'):
"""
Weight the input images by a set of weights images
Parameters
----------
im_dir : str
Path to directory containing the images
wt_dir : str
Path to directory containing the weights
weight_dir : str
Path to directory for the newly weighted images
im_weight_dir : str
Path to subdirectory containing the weighted images
wt_weight_dir : str
Path to subdirectory containgn the weights images (same as before, they haven't changed)
imtype : str, optional
Type of input image used (Default: intbgsub)
wttype : str, optional
Type of weight image used (Defaaut: rrhr)
"""
im_suff, wt_suff = '*-{}.fits'.format(imtype), '*-{}.fits'.format(wttype)
imfiles = sorted(glob.glob(os.path.join(im_dir, im_suff)))
wtfiles = sorted(glob.glob(os.path.join(wt_dir, wt_suff)))
# weight each image
for i in range(len(imfiles)):
# read in the data
imfile = imfiles[i]
wtfile = os.path.join(os.path.dirname(wtfiles[i]), os.path.basename(imfile).replace(imtype, wttype))
im, hdr = astropy.io.fits.getdata(imfile, header=True)
rrhr, rrhrhdr = astropy.io.fits.getdata(wtfile, header=True)
# weight the data by the exposure time
wt = rrhr
newim = im * wt
# write data to new files and copy the *_area.fits files created by Montage to have the same naming convention
newfile = os.path.join(im_weight_dir, os.path.basename(imfile))
astropy.io.fits.writeto(newfile, newim, hdr)
old_area_file = imfile.replace('.fits', '_area.fits')
if os.path.exists(old_area_file):
new_area_file = newfile.replace('.fits', '_area.fits')
shutil.copy(old_area_file, new_area_file)
weightfile = os.path.join(wt_weight_dir, os.path.basename(wtfile))
astropy.io.fits.writeto(weightfile, wt, rrhrhdr)
old_area_file = wtfile.replace('.fits', '_area.fits')
if os.path.exists(old_area_file):
new_area_file = weightfile.replace('.fits', '_area.fits')
shutil.copy(old_area_file, new_area_file)
def create_table(in_dir, dir_type=None):
"""
Create a metadata table using Montage for all the files in a given directory
Parameters
----------
in_dir : str
Path to directory containing the files
dir_type : str, optional
type of file you are creating a table for, e.g., 'intbgsub, rrhr, wt' (Default: None)
Returns
-------
reprojected_table
Path to the table containing the metadata
"""
if dir_type is None:
reprojected_table = os.path.join(in_dir, 'reprojected.tbl')
else:
reprojected_table = os.path.join(in_dir, dir_type + '_reprojected.tbl')
montage.mImgtbl(in_dir, reprojected_table, corners=True)
return reprojected_table
def coadd(template_header, output_dir, input_dir, output=None, add_type=None):
"""
Coadd input images to create mosaic.
Parameters
----------
template_header : ascii file
File containing new WCS
output_dir : str
Path to directory contianing the output image
input_dir : str
path to directory containing the input images
output : str, optional
Type of mosaic you're making: e.g., int, wt, count (Default: None)
add_type : str, optional
Montage argument -- type of coadding to perform (Default: None -- defaults to Montage's default)
"""
img_dir = input_dir
# output is either 'weights' or 'int'
if output is None:
reprojected_table = os.path.join(img_dir, 'reprojected.tbl')
out_image = os.path.join(output_dir, 'mosaic.fits')
else:
reprojected_table = os.path.join(img_dir, output + '_reprojected.tbl')
out_image = os.path.join(output_dir, output + '_mosaic.fits')
montage.mAdd(reprojected_table, template_header, out_image, img_dir=img_dir, exact=True, type=add_type)
def finish_weight(output_dir, imtype='intbgsub', wttype='rrhr'):
"""
Divide out the weights from the final image to get back to flux density units
Parameters
----------
output_dir : str
Path to directory containing the output image
Returns
-------
newfile : str
Path to new, mosaiced file
"""
image_file = os.path.join(output_dir, '{}_mosaic.fits'.format(imtype))
wt_file = os.path.join(output_dir, '{}_mosaic.fits'.format(wttype))
im, hdr = astropy.io.fits.getdata(image_file, header=True)
wt = astropy.io.fits.getdata(wt_file)
newim = im / wt
newfile = os.path.join(output_dir, 'image_mosaic.fits')
astropy.io.fits.writeto(newfile, newim, hdr)
return newfile, wt_file
def counts2jy_galex(counts, cal, pix_as):
"""
Convert GALEX counts/s to MJy/sr
Parameters
----------
counts : float
Array containing counts data to be converted
cal : float
Calibration value from counts to AB mag for desired band (FUV or NUV)
pix_as : float
Pixel scale in arcseconds
Returns
-------
val : float
Count rate converted to MJy/sr
"""
# first convert to abmag
abmag = -2.5 * np.log10(counts) + cal
# then convert to Jy
f_nu = 10**(abmag/-2.5) * 3631.
# then to MJy
f_nu *= 1e-6
# then to MJy/sr
pix_rad = np.radians(pix_as / 3600.) # pixel scale coverted from arcsec to radians
pix_sr = pix_rad ** 2. # pixel scale converted from radians to steradians
val = f_nu / pix_sr
return val
def convert_to_flux_input(indir, outdir, band, pix_as, imtype='intbgsub'):
infiles = sorted(glob.glob(os.path.join(indir, '*-{}.fits'.format(imtype))))
for infile in infiles:
data, hdr = astropy.io.fits.getdata(infile, header=True)
newdata = counts2jy_galex(data, UV2AB[band.lower()], pix_as)
hdr['BUNIT'] = 'MJY/SR'
outfile = os.path.join(outdir, os.path.basename(infile))
astropy.io.fits.writeto(outfile, newdata, hdr)
def convert_to_flux_final(mosaicfile, band, pix_as):
data, hdr = astropy.io.fits.getdata(mosaicfile, header=True)
newim = counts2jy_galex(data, UV2AB[band.lower()], pix_as)
# APPEND UNIT INFORMATION TO NEW HEADER AND WRITE OUT HEADER FILE
hdr['BUNIT'] = 'MJY/SR' #gal_hdr.append2hdr(keyword='BUNIT', value='MJY/SR', ext=False)
newfile = os.path.join(os.path.dirname(mosaicfile), 'image_mosaic_mjysr.fits')
astropy.io.fits.writeto(newfile, newim, hdr)
| mit |
kyleam/seaborn | seaborn/linearmodels.py | 7 | 57401 | """Plotting functions for linear models (broadly construed)."""
from __future__ import division
import copy
import itertools
from textwrap import dedent
import numpy as np
import pandas as pd
from scipy.spatial import distance
import matplotlib as mpl
import matplotlib.pyplot as plt
import warnings
try:
import statsmodels
assert statsmodels
_has_statsmodels = True
except ImportError:
_has_statsmodels = False
from .external.six import string_types
from .external.six.moves import range
from . import utils
from . import algorithms as algo
from .palettes import color_palette
from .axisgrid import FacetGrid, PairGrid, _facet_docs
from .distributions import kdeplot
class _LinearPlotter(object):
"""Base class for plotting relational data in tidy format.
To get anything useful done you'll have to inherit from this, but setup
code that can be abstracted out should be put here.
"""
def establish_variables(self, data, **kws):
"""Extract variables from data or use directly."""
self.data = data
# Validate the inputs
any_strings = any([isinstance(v, string_types) for v in kws.values()])
if any_strings and data is None:
raise ValueError("Must pass `data` if using named variables.")
# Set the variables
for var, val in kws.items():
if isinstance(val, string_types):
setattr(self, var, data[val])
else:
setattr(self, var, val)
def dropna(self, *vars):
"""Remove observations with missing data."""
vals = [getattr(self, var) for var in vars]
vals = [v for v in vals if v is not None]
not_na = np.all(np.column_stack([pd.notnull(v) for v in vals]), axis=1)
for var in vars:
val = getattr(self, var)
if val is not None:
setattr(self, var, val[not_na])
def plot(self, ax):
raise NotImplementedError
class _RegressionPlotter(_LinearPlotter):
"""Plotter for numeric independent variables with regression model.
This does the computations and drawing for the `regplot` function, and
is thus also used indirectly by `lmplot`.
"""
def __init__(self, x, y, data=None, x_estimator=None, x_bins=None,
x_ci="ci", scatter=True, fit_reg=True, ci=95, n_boot=1000,
units=None, order=1, logistic=False, lowess=False,
robust=False, logx=False, x_partial=None, y_partial=None,
truncate=False, dropna=True, x_jitter=None, y_jitter=None,
color=None, label=None):
# Set member attributes
self.x_estimator = x_estimator
self.ci = ci
self.x_ci = ci if x_ci == "ci" else x_ci
self.n_boot = n_boot
self.scatter = scatter
self.fit_reg = fit_reg
self.order = order
self.logistic = logistic
self.lowess = lowess
self.robust = robust
self.logx = logx
self.truncate = truncate
self.x_jitter = x_jitter
self.y_jitter = y_jitter
self.color = color
self.label = label
# Validate the regression options:
if sum((order > 1, logistic, robust, lowess, logx)) > 1:
raise ValueError("Mutually exclusive regression options.")
# Extract the data vals from the arguments or passed dataframe
self.establish_variables(data, x=x, y=y, units=units,
x_partial=x_partial, y_partial=y_partial)
# Drop null observations
if dropna:
self.dropna("x", "y", "units", "x_partial", "y_partial")
# Regress nuisance variables out of the data
if self.x_partial is not None:
self.x = self.regress_out(self.x, self.x_partial)
if self.y_partial is not None:
self.y = self.regress_out(self.y, self.y_partial)
# Possibly bin the predictor variable, which implies a point estimate
if x_bins is not None:
self.x_estimator = np.mean if x_estimator is None else x_estimator
x_discrete, x_bins = self.bin_predictor(x_bins)
self.x_discrete = x_discrete
else:
self.x_discrete = self.x
# Save the range of the x variable for the grid later
self.x_range = self.x.min(), self.x.max()
@property
def scatter_data(self):
"""Data where each observation is a point."""
x_j = self.x_jitter
if x_j is None:
x = self.x
else:
x = self.x + np.random.uniform(-x_j, x_j, len(self.x))
y_j = self.y_jitter
if y_j is None:
y = self.y
else:
y = self.y + np.random.uniform(-y_j, y_j, len(self.y))
return x, y
@property
def estimate_data(self):
"""Data with a point estimate and CI for each discrete x value."""
x, y = self.x_discrete, self.y
vals = sorted(np.unique(x))
points, cis = [], []
for val in vals:
# Get the point estimate of the y variable
_y = y[x == val]
est = self.x_estimator(_y)
points.append(est)
# Compute the confidence interval for this estimate
if self.x_ci is None:
cis.append(None)
else:
units = None
if self.units is not None:
units = self.units[x == val]
boots = algo.bootstrap(_y, func=self.x_estimator,
n_boot=self.n_boot, units=units)
_ci = utils.ci(boots, self.x_ci)
cis.append(_ci)
return vals, points, cis
def fit_regression(self, ax=None, x_range=None, grid=None):
"""Fit the regression model."""
# Create the grid for the regression
if grid is None:
if self.truncate:
x_min, x_max = self.x_range
else:
if ax is None:
x_min, x_max = x_range
else:
x_min, x_max = ax.get_xlim()
grid = np.linspace(x_min, x_max, 100)
ci = self.ci
# Fit the regression
if self.order > 1:
yhat, yhat_boots = self.fit_poly(grid, self.order)
elif self.logistic:
from statsmodels.genmod.generalized_linear_model import GLM
from statsmodels.genmod.families import Binomial
yhat, yhat_boots = self.fit_statsmodels(grid, GLM,
family=Binomial())
elif self.lowess:
ci = None
grid, yhat = self.fit_lowess()
elif self.robust:
from statsmodels.robust.robust_linear_model import RLM
yhat, yhat_boots = self.fit_statsmodels(grid, RLM)
elif self.logx:
yhat, yhat_boots = self.fit_logx(grid)
else:
yhat, yhat_boots = self.fit_fast(grid)
# Compute the confidence interval at each grid point
if ci is None:
err_bands = None
else:
err_bands = utils.ci(yhat_boots, ci, axis=0)
return grid, yhat, err_bands
def fit_fast(self, grid):
"""Low-level regression and prediction using linear algebra."""
X, y = np.c_[np.ones(len(self.x)), self.x], self.y
grid = np.c_[np.ones(len(grid)), grid]
reg_func = lambda _x, _y: np.linalg.pinv(_x).dot(_y)
yhat = grid.dot(reg_func(X, y))
if self.ci is None:
return yhat, None
beta_boots = algo.bootstrap(X, y, func=reg_func,
n_boot=self.n_boot, units=self.units).T
yhat_boots = grid.dot(beta_boots).T
return yhat, yhat_boots
def fit_poly(self, grid, order):
"""Regression using numpy polyfit for higher-order trends."""
x, y = self.x, self.y
reg_func = lambda _x, _y: np.polyval(np.polyfit(_x, _y, order), grid)
yhat = reg_func(x, y)
if self.ci is None:
return yhat, None
yhat_boots = algo.bootstrap(x, y, func=reg_func,
n_boot=self.n_boot, units=self.units)
return yhat, yhat_boots
def fit_statsmodels(self, grid, model, **kwargs):
"""More general regression function using statsmodels objects."""
X, y = np.c_[np.ones(len(self.x)), self.x], self.y
grid = np.c_[np.ones(len(grid)), grid]
reg_func = lambda _x, _y: model(_y, _x, **kwargs).fit().predict(grid)
yhat = reg_func(X, y)
if self.ci is None:
return yhat, None
yhat_boots = algo.bootstrap(X, y, func=reg_func,
n_boot=self.n_boot, units=self.units)
return yhat, yhat_boots
def fit_lowess(self):
"""Fit a locally-weighted regression, which returns its own grid."""
from statsmodels.nonparametric.smoothers_lowess import lowess
grid, yhat = lowess(self.y, self.x).T
return grid, yhat
def fit_logx(self, grid):
"""Fit the model in log-space."""
X, y = np.c_[np.ones(len(self.x)), self.x], self.y
grid = np.c_[np.ones(len(grid)), np.log(grid)]
def reg_func(_x, _y):
_x = np.c_[_x[:, 0], np.log(_x[:, 1])]
return np.linalg.pinv(_x).dot(_y)
yhat = grid.dot(reg_func(X, y))
if self.ci is None:
return yhat, None
beta_boots = algo.bootstrap(X, y, func=reg_func,
n_boot=self.n_boot, units=self.units).T
yhat_boots = grid.dot(beta_boots).T
return yhat, yhat_boots
def bin_predictor(self, bins):
"""Discretize a predictor by assigning value to closest bin."""
x = self.x
if np.isscalar(bins):
percentiles = np.linspace(0, 100, bins + 2)[1:-1]
bins = np.c_[utils.percentiles(x, percentiles)]
else:
bins = np.c_[np.ravel(bins)]
dist = distance.cdist(np.c_[x], bins)
x_binned = bins[np.argmin(dist, axis=1)].ravel()
return x_binned, bins.ravel()
def regress_out(self, a, b):
"""Regress b from a keeping a's original mean."""
a_mean = a.mean()
a = a - a_mean
b = b - b.mean()
b = np.c_[b]
a_prime = a - b.dot(np.linalg.pinv(b).dot(a))
return (a_prime + a_mean).reshape(a.shape)
def plot(self, ax, scatter_kws, line_kws):
"""Draw the full plot."""
# Insert the plot label into the correct set of keyword arguments
if self.scatter:
scatter_kws["label"] = self.label
else:
line_kws["label"] = self.label
# Use the current color cycle state as a default
if self.color is None:
lines, = plt.plot(self.x.mean(), self.y.mean())
color = lines.get_color()
lines.remove()
else:
color = self.color
# Let color in keyword arguments override overall plot color
scatter_kws.setdefault("color", color)
line_kws.setdefault("color", color)
# Draw the constituent plots
if self.scatter:
self.scatterplot(ax, scatter_kws)
if self.fit_reg:
self.lineplot(ax, line_kws)
# Label the axes
if hasattr(self.x, "name"):
ax.set_xlabel(self.x.name)
if hasattr(self.y, "name"):
ax.set_ylabel(self.y.name)
def scatterplot(self, ax, kws):
"""Draw the data."""
# Treat the line-based markers specially, explicitly setting larger
# linewidth than is provided by the seaborn style defaults.
# This would ideally be handled better in matplotlib (i.e., distinguish
# between edgewidth for solid glyphs and linewidth for line glyphs
# but this should do for now.
line_markers = ["1", "2", "3", "4", "+", "x", "|", "_"]
if self.x_estimator is None:
if "marker" in kws and kws["marker"] in line_markers:
lw = mpl.rcParams["lines.linewidth"]
else:
lw = mpl.rcParams["lines.markeredgewidth"]
kws.setdefault("linewidths", lw)
if not hasattr(kws['color'], 'shape') or kws['color'].shape[1] < 4:
kws.setdefault("alpha", .8)
x, y = self.scatter_data
ax.scatter(x, y, **kws)
else:
# TODO abstraction
ci_kws = {"color": kws["color"]}
ci_kws["linewidth"] = mpl.rcParams["lines.linewidth"] * 1.75
kws.setdefault("s", 50)
xs, ys, cis = self.estimate_data
if [ci for ci in cis if ci is not None]:
for x, ci in zip(xs, cis):
ax.plot([x, x], ci, **ci_kws)
ax.scatter(xs, ys, **kws)
def lineplot(self, ax, kws):
"""Draw the model."""
xlim = ax.get_xlim()
# Fit the regression model
grid, yhat, err_bands = self.fit_regression(ax)
# Get set default aesthetics
fill_color = kws["color"]
lw = kws.pop("lw", mpl.rcParams["lines.linewidth"] * 1.5)
kws.setdefault("linewidth", lw)
# Draw the regression line and confidence interval
ax.plot(grid, yhat, **kws)
if err_bands is not None:
ax.fill_between(grid, *err_bands, color=fill_color, alpha=.15)
ax.set_xlim(*xlim)
_regression_docs = dict(
model_api=dedent("""\
There are a number of mutually exclusive options for estimating the
regression model: ``order``, ``logistic``, ``lowess``, ``robust``, and
``logx``. See the parameter docs for more information on these options.\
"""),
regplot_vs_lmplot=dedent("""\
Understanding the difference between :func:`regplot` and :func:`lmplot` can
be a bit tricky. In fact, they are closely related, as :func:`lmplot` uses
:func:`regplot` internally and takes most of its parameters. However,
:func:`regplot` is an axes-level function, so it draws directly onto an
axes (either the currently active axes or the one provided by the ``ax``
parameter), while :func:`lmplot` is a figure-level function and creates its
own figure, which is managed through a :class:`FacetGrid`. This has a few
consequences, namely that :func:`regplot` can happily coexist in a figure
with other kinds of plots and will follow the global matplotlib color
cycle. In contrast, :func:`lmplot` needs to occupy an entire figure, and
the size and color cycle are controlled through function parameters,
ignoring the global defaults.\
"""),
x_estimator=dedent("""\
x_estimator : callable that maps vector -> scalar, optional
Apply this function to each unique value of ``x`` and plot the
resulting estimate. This is useful when ``x`` is a discrete variable.
If ``x_ci`` is not ``None``, this estimate will be bootstrapped and a
confidence interval will be drawn.\
"""),
x_bins=dedent("""\
x_bins : int or vector, optional
Bin the ``x`` variable into discrete bins and then estimate the central
tendency and a confidence interval. This binning only influences how
the scatterplot is drawn; the regression is still fit to the original
data. This parameter is interpreted either as the number of
evenly-sized (not necessary spaced) bins or the positions of the bin
centers. When this parameter is used, it implies that the default of
``x_estimator`` is ``numpy.mean``.\
"""),
x_ci=dedent("""\
x_ci : "ci", int in [0, 100] or None, optional
Size of the confidence interval used when plotting a central tendency
for discrete values of ``x``. If "ci", defer to the value of the``ci``
parameter.\
"""),
scatter=dedent("""\
scatter : bool, optional
If ``True``, draw a scatterplot with the underlying observations (or
the ``x_estimator`` values).\
"""),
fit_reg=dedent("""\
fit_reg : bool, optional
If ``True``, estimate and plot a regression model relating the ``x``
and ``y`` variables.\
"""),
ci=dedent("""\
ci : int in [0, 100] or None, optional
Size of the confidence interval for the regression estimate. This will
be drawn using translucent bands around the regression line. The
confidence interval is estimated using a bootstrap; for large
datasets, it may be advisable to avoid that computation by setting
this parameter to None.\
"""),
n_boot=dedent("""\
n_boot : int, optional
Number of bootstrap resamples used to estimate the ``ci``. The default
value attempts to balance time and stability; you may want to increase
this value for "final" versions of plots.\
"""),
units=dedent("""\
units : variable name in ``data``, optional
If the ``x`` and ``y`` observations are nested within sampling units,
those can be specified here. This will be taken into account when
computing the confidence intervals by performing a multilevel bootstrap
that resamples both units and observations (within unit). This does not
otherwise influence how the regression is estimated or drawn.\
"""),
order=dedent("""\
order : int, optional
If ``order`` is greater than 1, use ``numpy.polyfit`` to estimate a
polynomial regression.\
"""),
logistic=dedent("""\
logistic : bool, optional
If ``True``, assume that ``y`` is a binary variable and use
``statsmodels`` to estimate a logistic regression model. Note that this
is substantially more computationally intensive than linear regression,
so you may wish to decrease the number of bootstrap resamples
(``n_boot``) or set ``ci`` to None.\
"""),
lowess=dedent("""\
lowess : bool, optional
If ``True``, use ``statsmodels`` to estimate a nonparametric lowess
model (locally weighted linear regression). Note that confidence
intervals cannot currently be drawn for this kind of model.\
"""),
robust=dedent("""\
robust : bool, optional
If ``True``, use ``statsmodels`` to estimate a robust regression. This
will de-weight outliers. Note that this is substantially more
computationally intensive than standard linear regression, so you may
wish to decrease the number of bootstrap resamples (``n_boot``) or set
``ci`` to None.\
"""),
logx=dedent("""\
logx : bool, optional
If ``True``, estimate a linear regression of the form y ~ log(x), but
plot the scatterplot and regression model in the input space. Note that
``x`` must be positive for this to work.\
"""),
xy_partial=dedent("""\
{x,y}_partial : strings in ``data`` or matrices
Confounding variables to regress out of the ``x`` or ``y`` variables
before plotting.\
"""),
truncate=dedent("""\
truncate : bool, optional
By default, the regression line is drawn to fill the x axis limits
after the scatterplot is drawn. If ``truncate`` is ``True``, it will
instead by bounded by the data limits.\
"""),
xy_jitter=dedent("""\
{x,y}_jitter : floats, optional
Add uniform random noise of this size to either the ``x`` or ``y``
variables. The noise is added to a copy of the data after fitting the
regression, and only influences the look of the scatterplot. This can
be helpful when plotting variables that take discrete values.\
"""),
scatter_line_kws=dedent("""\
{scatter,line}_kws : dictionaries
Additional keyword arguments to pass to ``plt.scatter`` and
``plt.plot``.\
"""),
)
_regression_docs.update(_facet_docs)
def lmplot(x, y, data, hue=None, col=None, row=None, palette=None,
col_wrap=None, size=5, aspect=1, markers="o", sharex=True,
sharey=True, hue_order=None, col_order=None, row_order=None,
legend=True, legend_out=True, x_estimator=None, x_bins=None,
x_ci="ci", scatter=True, fit_reg=True, ci=95, n_boot=1000,
units=None, order=1, logistic=False, lowess=False, robust=False,
logx=False, x_partial=None, y_partial=None, truncate=False,
x_jitter=None, y_jitter=None, scatter_kws=None, line_kws=None):
# Reduce the dataframe to only needed columns
need_cols = [x, y, hue, col, row, units, x_partial, y_partial]
cols = np.unique([a for a in need_cols if a is not None]).tolist()
data = data[cols]
# Initialize the grid
facets = FacetGrid(data, row, col, hue, palette=palette,
row_order=row_order, col_order=col_order,
hue_order=hue_order, size=size, aspect=aspect,
col_wrap=col_wrap, sharex=sharex, sharey=sharey,
legend_out=legend_out)
# Add the markers here as FacetGrid has figured out how many levels of the
# hue variable are needed and we don't want to duplicate that process
if facets.hue_names is None:
n_markers = 1
else:
n_markers = len(facets.hue_names)
if not isinstance(markers, list):
markers = [markers] * n_markers
if len(markers) != n_markers:
raise ValueError(("markers must be a singeton or a list of markers "
"for each level of the hue variable"))
facets.hue_kws = {"marker": markers}
# Hack to set the x limits properly, which needs to happen here
# because the extent of the regression estimate is determined
# by the limits of the plot
if sharex:
for ax in facets.axes.flat:
scatter = ax.scatter(data[x], np.ones(len(data)) * data[y].mean())
scatter.remove()
# Draw the regression plot on each facet
regplot_kws = dict(
x_estimator=x_estimator, x_bins=x_bins, x_ci=x_ci,
scatter=scatter, fit_reg=fit_reg, ci=ci, n_boot=n_boot, units=units,
order=order, logistic=logistic, lowess=lowess, robust=robust,
logx=logx, x_partial=x_partial, y_partial=y_partial, truncate=truncate,
x_jitter=x_jitter, y_jitter=y_jitter,
scatter_kws=scatter_kws, line_kws=line_kws,
)
facets.map_dataframe(regplot, x, y, **regplot_kws)
# Add a legend
if legend and (hue is not None) and (hue not in [col, row]):
facets.add_legend()
return facets
lmplot.__doc__ = dedent("""\
Plot data and regression model fits across a FacetGrid.
This function combines :func:`regplot` and :class:`FacetGrid`. It is
intended as a convenient interface to fit regression models across
conditional subsets of a dataset.
When thinking about how to assign variables to different facets, a general
rule is that it makes sense to use ``hue`` for the most important
comparison, followed by ``col`` and ``row``. However, always think about
your particular dataset and the goals of the visualization you are
creating.
{model_api}
The parameters to this function span most of the options in
:class:`FacetGrid`, although there may be occasional cases where you will
want to use that class and :func:`regplot` directly.
Parameters
----------
x, y : strings, optional
Input variables; these should be column names in ``data``.
{data}
hue, col, row : strings
Variables that define subsets of the data, which will be drawn on
separate facets in the grid. See the ``*_order`` parameters to control
the order of levels of this variable.
{palette}
{col_wrap}
{size}
{aspect}
markers : matplotlib marker code or list of marker codes, optional
Markers for the scatterplot. If a list, each marker in the list will be
used for each level of the ``hue`` variable.
{share_xy}
{{hue,col,row}}_order : lists, optional
Order for the levels of the faceting variables. By default, this will
be the order that the levels appear in ``data`` or, if the variables
are pandas categoricals, the category order.
legend : bool, optional
If ``True`` and there is a ``hue`` variable, add a legend.
{legend_out}
{x_estimator}
{x_bins}
{x_ci}
{scatter}
{fit_reg}
{ci}
{n_boot}
{units}
{order}
{logistic}
{lowess}
{robust}
{logx}
{xy_partial}
{truncate}
{xy_jitter}
{scatter_line_kws}
See Also
--------
regplot : Plot data and a conditional model fit.
FacetGrid : Subplot grid for plotting conditional relationships.
pairplot : Combine :func:`regplot` and :class:`PairGrid` (when used with
``kind="reg"``).
Notes
-----
{regplot_vs_lmplot}
Examples
--------
These examples focus on basic regression model plots to exhibit the
various faceting options; see the :func:`regplot` docs for demonstrations
of the other options for plotting the data and models. There are also
other examples for how to manipulate plot using the returned object on
the :class:`FacetGrid` docs.
Plot a simple linear relationship between two variables:
.. plot::
:context: close-figs
>>> import seaborn as sns; sns.set(color_codes=True)
>>> tips = sns.load_dataset("tips")
>>> g = sns.lmplot(x="total_bill", y="tip", data=tips)
Condition on a third variable and plot the levels in different colors:
.. plot::
:context: close-figs
>>> g = sns.lmplot(x="total_bill", y="tip", hue="smoker", data=tips)
Use different markers as well as colors so the plot will reproduce to
black-and-white more easily:
.. plot::
:context: close-figs
>>> g = sns.lmplot(x="total_bill", y="tip", hue="smoker", data=tips,
... markers=["o", "x"])
Use a different color palette:
.. plot::
:context: close-figs
>>> g = sns.lmplot(x="total_bill", y="tip", hue="smoker", data=tips,
... palette="Set1")
Map ``hue`` levels to colors with a dictionary:
.. plot::
:context: close-figs
>>> g = sns.lmplot(x="total_bill", y="tip", hue="smoker", data=tips,
... palette=dict(Yes="g", No="m"))
Plot the levels of the third variable across different columns:
.. plot::
:context: close-figs
>>> g = sns.lmplot(x="total_bill", y="tip", col="smoker", data=tips)
Change the size and aspect ratio of the facets:
.. plot::
:context: close-figs
>>> g = sns.lmplot(x="size", y="total_bill", hue="day", col="day",
... data=tips, aspect=.4, x_jitter=.1)
Wrap the levels of the column variable into multiple rows:
.. plot::
:context: close-figs
>>> g = sns.lmplot(x="total_bill", y="tip", col="day", hue="day",
... data=tips, col_wrap=2, size=3)
Condition on two variables to make a full grid:
.. plot::
:context: close-figs
>>> g = sns.lmplot(x="total_bill", y="tip", row="sex", col="time",
... data=tips, size=3)
Use methods on the returned :class:`FacetGrid` instance to further tweak
the plot:
.. plot::
:context: close-figs
>>> g = sns.lmplot(x="total_bill", y="tip", row="sex", col="time",
... data=tips, size=3)
>>> g = (g.set_axis_labels("Total bill (US Dollars)", "Tip")
... .set(xlim=(0, 60), ylim=(0, 12),
... xticks=[10, 30, 50], yticks=[2, 6, 10])
... .fig.subplots_adjust(wspace=.02))
""").format(**_regression_docs)
def regplot(x, y, data=None, x_estimator=None, x_bins=None, x_ci="ci",
scatter=True, fit_reg=True, ci=95, n_boot=1000, units=None,
order=1, logistic=False, lowess=False, robust=False,
logx=False, x_partial=None, y_partial=None,
truncate=False, dropna=True, x_jitter=None, y_jitter=None,
label=None, color=None, marker="o",
scatter_kws=None, line_kws=None, ax=None):
plotter = _RegressionPlotter(x, y, data, x_estimator, x_bins, x_ci,
scatter, fit_reg, ci, n_boot, units,
order, logistic, lowess, robust, logx,
x_partial, y_partial, truncate, dropna,
x_jitter, y_jitter, color, label)
if ax is None:
ax = plt.gca()
scatter_kws = {} if scatter_kws is None else copy.copy(scatter_kws)
scatter_kws["marker"] = marker
line_kws = {} if line_kws is None else copy.copy(line_kws)
plotter.plot(ax, scatter_kws, line_kws)
return ax
regplot.__doc__ = dedent("""\
Plot data and a linear regression model fit.
{model_api}
Parameters
----------
x, y: string, series, or vector array
Input variables. If strings, these should correspond with column names
in ``data``. When pandas objects are used, axes will be labeled with
the series name.
{data}
{x_estimator}
{x_bins}
{x_ci}
{scatter}
{fit_reg}
{ci}
{n_boot}
{units}
{order}
{logistic}
{lowess}
{robust}
{logx}
{xy_partial}
{truncate}
{xy_jitter}
label : string
Label to apply to ether the scatterplot or regression line (if
``scatter`` is ``False``) for use in a legend.
color : matplotlib color
Color to apply to all plot elements; will be superseded by colors
passed in ``scatter_kws`` or ``line_kws``.
marker : matplotlib marker code
Marker to use for the scatterplot glyphs.
{scatter_line_kws}
ax : matplotlib Axes, optional
Axes object to draw the plot onto, otherwise uses the current Axes.
Returns
-------
ax : matplotlib Axes
The Axes object containing the plot.
See Also
--------
lmplot : Combine :func:`regplot` and :class:`FacetGrid` to plot multiple
linear relationships in a dataset.
jointplot : Combine :func:`regplot` and :class:`JointGrid` (when used with
``kind="reg"``).
pairplot : Combine :func:`regplot` and :class:`PairGrid` (when used with
``kind="reg"``).
residplot : Plot the residuals of a linear regression model.
interactplot : Plot a two-way interaction between continuous variables
Notes
-----
{regplot_vs_lmplot}
It's also easy to combine combine :func:`regplot` and :class:`JointGrid` or
:class:`PairGrid` through the :func:`jointplot` and :func:`pairplot`
functions, although these do not directly accept all of :func:`regplot`'s
parameters.
Examples
--------
Plot the relationship between two variables in a DataFrame:
.. plot::
:context: close-figs
>>> import seaborn as sns; sns.set(color_codes=True)
>>> tips = sns.load_dataset("tips")
>>> ax = sns.regplot(x="total_bill", y="tip", data=tips)
Plot with two variables defined as numpy arrays; use a different color:
.. plot::
:context: close-figs
>>> import numpy as np; np.random.seed(8)
>>> mean, cov = [4, 6], [(1.5, .7), (.7, 1)]
>>> x, y = np.random.multivariate_normal(mean, cov, 80).T
>>> ax = sns.regplot(x=x, y=y, color="g")
Plot with two variables defined as pandas Series; use a different marker:
.. plot::
:context: close-figs
>>> import pandas as pd
>>> x, y = pd.Series(x, name="x_var"), pd.Series(y, name="y_var")
>>> ax = sns.regplot(x=x, y=y, marker="+")
Use a 68% confidence interval, which corresponds with the standard error
of the estimate:
.. plot::
:context: close-figs
>>> ax = sns.regplot(x=x, y=y, ci=68)
Plot with a discrete ``x`` variable and add some jitter:
.. plot::
:context: close-figs
>>> ax = sns.regplot(x="size", y="total_bill", data=tips, x_jitter=.1)
Plot with a discrete ``x`` variable showing means and confidence intervals
for unique values:
.. plot::
:context: close-figs
>>> ax = sns.regplot(x="size", y="total_bill", data=tips,
... x_estimator=np.mean)
Plot with a continuous variable divided into discrete bins:
.. plot::
:context: close-figs
>>> ax = sns.regplot(x=x, y=y, x_bins=4)
Fit a higher-order polynomial regression and truncate the model prediction:
.. plot::
:context: close-figs
>>> ans = sns.load_dataset("anscombe")
>>> ax = sns.regplot(x="x", y="y", data=ans.loc[ans.dataset == "II"],
... scatter_kws={{"s": 80}},
... order=2, ci=None, truncate=True)
Fit a robust regression and don't plot a confidence interval:
.. plot::
:context: close-figs
>>> ax = sns.regplot(x="x", y="y", data=ans.loc[ans.dataset == "III"],
... scatter_kws={{"s": 80}},
... robust=True, ci=None)
Fit a logistic regression; jitter the y variable and use fewer bootstrap
iterations:
.. plot::
:context: close-figs
>>> tips["big_tip"] = (tips.tip / tips.total_bill) > .175
>>> ax = sns.regplot(x="total_bill", y="big_tip", data=tips,
... logistic=True, n_boot=500, y_jitter=.03)
Fit the regression model using log(x) and truncate the model prediction:
.. plot::
:context: close-figs
>>> ax = sns.regplot(x="size", y="total_bill", data=tips,
... x_estimator=np.mean, logx=True, truncate=True)
""").format(**_regression_docs)
def residplot(x, y, data=None, lowess=False, x_partial=None, y_partial=None,
order=1, robust=False, dropna=True, label=None, color=None,
scatter_kws=None, line_kws=None, ax=None):
"""Plot the residuals of a linear regression.
This function will regress y on x (possibly as a robust or polynomial
regression) and then draw a scatterplot of the residuals. You can
optionally fit a lowess smoother to the residual plot, which can
help in determining if there is structure to the residuals.
Parameters
----------
x : vector or string
Data or column name in `data` for the predictor variable.
y : vector or string
Data or column name in `data` for the response variable.
data : DataFrame, optional
DataFrame to use if `x` and `y` are column names.
lowess : boolean, optional
Fit a lowess smoother to the residual scatterplot.
{x, y}_partial : matrix or string(s) , optional
Matrix with same first dimension as `x`, or column name(s) in `data`.
These variables are treated as confounding and are removed from
the `x` or `y` variables before plotting.
order : int, optional
Order of the polynomial to fit when calculating the residuals.
robust : boolean, optional
Fit a robust linear regression when calculating the residuals.
dropna : boolean, optional
If True, ignore observations with missing data when fitting and
plotting.
label : string, optional
Label that will be used in any plot legends.
color : matplotlib color, optional
Color to use for all elements of the plot.
{scatter, line}_kws : dictionaries, optional
Additional keyword arguments passed to scatter() and plot() for drawing
the components of the plot.
ax : matplotlib axis, optional
Plot into this axis, otherwise grab the current axis or make a new
one if not existing.
Returns
-------
ax: matplotlib axes
Axes with the regression plot.
See Also
--------
regplot : Plot a simple linear regression model.
jointplot (with kind="resid"): Draw a residplot with univariate
marginal distrbutions.
"""
plotter = _RegressionPlotter(x, y, data, ci=None,
order=order, robust=robust,
x_partial=x_partial, y_partial=y_partial,
dropna=dropna, color=color, label=label)
if ax is None:
ax = plt.gca()
# Calculate the residual from a linear regression
_, yhat, _ = plotter.fit_regression(grid=plotter.x)
plotter.y = plotter.y - yhat
# Set the regression option on the plotter
if lowess:
plotter.lowess = True
else:
plotter.fit_reg = False
# Plot a horizontal line at 0
ax.axhline(0, ls=":", c=".2")
# Draw the scatterplot
scatter_kws = {} if scatter_kws is None else scatter_kws
line_kws = {} if line_kws is None else line_kws
plotter.plot(ax, scatter_kws, line_kws)
return ax
def coefplot(formula, data, groupby=None, intercept=False, ci=95,
palette="husl"):
"""Plot the coefficients from a linear model.
Parameters
----------
formula : string
patsy formula for ols model
data : dataframe
data for the plot; formula terms must appear in columns
groupby : grouping object, optional
object to group data with to fit conditional models
intercept : bool, optional
if False, strips the intercept term before plotting
ci : float, optional
size of confidence intervals
palette : seaborn color palette, optional
palette for the horizonal plots
"""
if not _has_statsmodels:
raise ImportError("The `coefplot` function requires statsmodels")
import statsmodels.formula.api as sf
alpha = 1 - ci / 100
if groupby is None:
coefs = sf.ols(formula, data).fit().params
cis = sf.ols(formula, data).fit().conf_int(alpha)
else:
grouped = data.groupby(groupby)
coefs = grouped.apply(lambda d: sf.ols(formula, d).fit().params).T
cis = grouped.apply(lambda d: sf.ols(formula, d).fit().conf_int(alpha))
# Possibly ignore the intercept
if not intercept:
coefs = coefs.ix[1:]
n_terms = len(coefs)
# Plot seperately depending on groupby
w, h = mpl.rcParams["figure.figsize"]
hsize = lambda n: n * (h / 2)
wsize = lambda n: n * (w / (4 * (n / 5)))
if groupby is None:
colors = itertools.cycle(color_palette(palette, n_terms))
f, ax = plt.subplots(1, 1, figsize=(wsize(n_terms), hsize(1)))
for i, term in enumerate(coefs.index):
color = next(colors)
low, high = cis.ix[term]
ax.plot([i, i], [low, high], c=color,
solid_capstyle="round", lw=2.5)
ax.plot(i, coefs.ix[term], "o", c=color, ms=8)
ax.set_xlim(-.5, n_terms - .5)
ax.axhline(0, ls="--", c="dimgray")
ax.set_xticks(range(n_terms))
ax.set_xticklabels(coefs.index)
else:
n_groups = len(coefs.columns)
f, axes = plt.subplots(n_terms, 1, sharex=True,
figsize=(wsize(n_groups), hsize(n_terms)))
if n_terms == 1:
axes = [axes]
colors = itertools.cycle(color_palette(palette, n_groups))
for ax, term in zip(axes, coefs.index):
for i, group in enumerate(coefs.columns):
color = next(colors)
low, high = cis.ix[(group, term)]
ax.plot([i, i], [low, high], c=color,
solid_capstyle="round", lw=2.5)
ax.plot(i, coefs.loc[term, group], "o", c=color, ms=8)
ax.set_xlim(-.5, n_groups - .5)
ax.axhline(0, ls="--", c="dimgray")
ax.set_title(term)
ax.set_xlabel(groupby)
ax.set_xticks(range(n_groups))
ax.set_xticklabels(coefs.columns)
def interactplot(x1, x2, y, data=None, filled=False, cmap="RdBu_r",
colorbar=True, levels=30, logistic=False,
contour_kws=None, scatter_kws=None, ax=None, **kwargs):
"""Visualize a continuous two-way interaction with a contour plot.
Parameters
----------
x1, x2, y, strings or array-like
Either the two independent variables and the dependent variable,
or keys to extract them from `data`
data : DataFrame
Pandas DataFrame with the data in the columns.
filled : bool
Whether to plot with filled or unfilled contours
cmap : matplotlib colormap
Colormap to represent yhat in the countour plot.
colorbar : bool
Whether to draw the colorbar for interpreting the color values.
levels : int or sequence
Number or position of contour plot levels.
logistic : bool
Fit a logistic regression model instead of linear regression.
contour_kws : dictionary
Keyword arguments for contour[f]().
scatter_kws : dictionary
Keyword arguments for plot().
ax : matplotlib axis
Axis to draw plot in.
Returns
-------
ax : Matplotlib axis
Axis with the contour plot.
"""
if not _has_statsmodels:
raise ImportError("The `interactplot` function requires statsmodels")
from statsmodels.regression.linear_model import OLS
from statsmodels.genmod.generalized_linear_model import GLM
from statsmodels.genmod.families import Binomial
# Handle the form of the data
if data is not None:
x1 = data[x1]
x2 = data[x2]
y = data[y]
if hasattr(x1, "name"):
xlabel = x1.name
else:
xlabel = None
if hasattr(x2, "name"):
ylabel = x2.name
else:
ylabel = None
if hasattr(y, "name"):
clabel = y.name
else:
clabel = None
x1 = np.asarray(x1)
x2 = np.asarray(x2)
y = np.asarray(y)
# Initialize the scatter keyword dictionary
if scatter_kws is None:
scatter_kws = {}
if not ("color" in scatter_kws or "c" in scatter_kws):
scatter_kws["color"] = "#222222"
if "alpha" not in scatter_kws:
scatter_kws["alpha"] = 0.75
# Intialize the contour keyword dictionary
if contour_kws is None:
contour_kws = {}
# Initialize the axis
if ax is None:
ax = plt.gca()
# Plot once to let matplotlib sort out the axis limits
ax.plot(x1, x2, "o", **scatter_kws)
# Find the plot limits
x1min, x1max = ax.get_xlim()
x2min, x2max = ax.get_ylim()
# Make the grid for the contour plot
x1_points = np.linspace(x1min, x1max, 100)
x2_points = np.linspace(x2min, x2max, 100)
xx1, xx2 = np.meshgrid(x1_points, x2_points)
# Fit the model with an interaction
X = np.c_[np.ones(x1.size), x1, x2, x1 * x2]
if logistic:
lm = GLM(y, X, family=Binomial()).fit()
else:
lm = OLS(y, X).fit()
# Evaluate the model on the grid
eval = np.vectorize(lambda x1_, x2_: lm.predict([1, x1_, x2_, x1_ * x2_]))
yhat = eval(xx1, xx2)
# Default color limits put the midpoint at mean(y)
y_bar = y.mean()
c_min = min(np.percentile(y, 2), yhat.min())
c_max = max(np.percentile(y, 98), yhat.max())
delta = max(c_max - y_bar, y_bar - c_min)
c_min, cmax = y_bar - delta, y_bar + delta
contour_kws.setdefault("vmin", c_min)
contour_kws.setdefault("vmax", c_max)
# Draw the contour plot
func_name = "contourf" if filled else "contour"
contour = getattr(ax, func_name)
c = contour(xx1, xx2, yhat, levels, cmap=cmap, **contour_kws)
# Draw the scatter again so it's visible
ax.plot(x1, x2, "o", **scatter_kws)
# Draw a colorbar, maybe
if colorbar:
bar = plt.colorbar(c)
# Label the axes
if xlabel is not None:
ax.set_xlabel(xlabel)
if ylabel is not None:
ax.set_ylabel(ylabel)
if clabel is not None and colorbar:
clabel = "P(%s)" % clabel if logistic else clabel
bar.set_label(clabel, labelpad=15, rotation=270)
return ax
def corrplot(data, names=None, annot=True, sig_stars=True, sig_tail="both",
sig_corr=True, cmap=None, cmap_range=None, cbar=True,
diag_names=True, method=None, ax=None, **kwargs):
"""Plot a correlation matrix with colormap and r values.
NOTE: This function is deprecated in favor of :func:`heatmap` and will
be removed in a forthcoming release.
Parameters
----------
data : Dataframe or nobs x nvars array
Rectangular input data with variabes in the columns.
names : sequence of strings
Names to associate with variables if `data` is not a DataFrame.
annot : bool
Whether to annotate the upper triangle with correlation coefficients.
sig_stars : bool
If True, get significance with permutation test and denote with stars.
sig_tail : both | upper | lower
Direction for significance test. Also controls the default colorbar.
sig_corr : bool
If True, use FWE-corrected p values for the sig stars.
cmap : colormap
Colormap name as string or colormap object.
cmap_range : None, "full", (low, high)
Either truncate colormap at (-max(abs(r)), max(abs(r))), use the
full range (-1, 1), or specify (min, max) values for the colormap.
cbar : bool
If true, plot the colorbar legend.
method: None (pearson) | kendall | spearman
Correlation method to compute pairwise correlations. Methods other
than the default pearson correlation will not have a significance
computed.
ax : matplotlib axis
Axis to draw plot in.
kwargs : other keyword arguments
Passed to ax.matshow()
Returns
-------
ax : matplotlib axis
Axis object with plot.
"""
warnings.warn(("The `corrplot` function has been deprecated in favor "
"of `heatmap` and will be removed in a forthcoming "
"release. Please update your code."))
if not isinstance(data, pd.DataFrame):
if names is None:
names = ["var_%d" % i for i in range(data.shape[1])]
data = pd.DataFrame(data, columns=names, dtype=np.float)
# Calculate the correlation matrix of the dataframe
if method is None:
corrmat = data.corr()
else:
corrmat = data.corr(method=method)
# Pandas will drop non-numeric columns; let's keep track of that operation
names = corrmat.columns
data = data[names]
# Get p values with a permutation test
if annot and sig_stars and method is None:
p_mat = algo.randomize_corrmat(data.values.T, sig_tail, sig_corr)
else:
p_mat = None
# Sort out the color range
if cmap_range is None:
triu = np.triu_indices(len(corrmat), 1)
vmax = min(1, np.max(np.abs(corrmat.values[triu])) * 1.15)
vmin = -vmax
if sig_tail == "both":
cmap_range = vmin, vmax
elif sig_tail == "upper":
cmap_range = 0, vmax
elif sig_tail == "lower":
cmap_range = vmin, 0
elif cmap_range == "full":
cmap_range = (-1, 1)
# Find a colormapping, somewhat intelligently
if cmap is None:
if min(cmap_range) >= 0:
cmap = "OrRd"
elif max(cmap_range) <= 0:
cmap = "PuBu_r"
else:
cmap = "coolwarm"
if cmap == "jet":
# Paternalism
raise ValueError("Never use the 'jet' colormap!")
# Plot using the more general symmatplot function
ax = symmatplot(corrmat, p_mat, names, cmap, cmap_range,
cbar, annot, diag_names, ax, **kwargs)
return ax
def symmatplot(mat, p_mat=None, names=None, cmap="Greys", cmap_range=None,
cbar=True, annot=True, diag_names=True, ax=None, **kwargs):
"""Plot a symmetric matrix with colormap and statistic values.
NOTE: This function is deprecated in favor of :func:`heatmap` and will
be removed in a forthcoming release.
"""
warnings.warn(("The `symmatplot` function has been deprecated in favor "
"of `heatmap` and will be removed in a forthcoming "
"release. Please update your code."))
if ax is None:
ax = plt.gca()
nvars = len(mat)
if isinstance(mat, pd.DataFrame):
plotmat = mat.values.copy()
mat = mat.values
else:
plotmat = mat.copy()
plotmat[np.triu_indices(nvars)] = np.nan
if cmap_range is None:
vmax = np.nanmax(plotmat) * 1.15
vmin = np.nanmin(plotmat) * 1.15
elif len(cmap_range) == 2:
vmin, vmax = cmap_range
else:
raise ValueError("cmap_range argument not understood")
mat_img = ax.matshow(plotmat, cmap=cmap, vmin=vmin, vmax=vmax, **kwargs)
if cbar:
plt.colorbar(mat_img, shrink=.75)
if p_mat is None:
p_mat = np.ones((nvars, nvars))
if annot:
for i, j in zip(*np.triu_indices(nvars, 1)):
val = mat[i, j]
stars = utils.sig_stars(p_mat[i, j])
ax.text(j, i, "\n%.2g\n%s" % (val, stars),
fontdict=dict(ha="center", va="center"))
else:
fill = np.ones_like(plotmat)
fill[np.tril_indices_from(fill, -1)] = np.nan
ax.matshow(fill, cmap="Greys", vmin=0, vmax=0, zorder=2)
if names is None:
names = ["var%d" % i for i in range(nvars)]
if diag_names:
for i, name in enumerate(names):
ax.text(i, i, name, fontdict=dict(ha="center", va="center",
weight="bold", rotation=45))
ax.set_xticklabels(())
ax.set_yticklabels(())
else:
ax.xaxis.set_ticks_position("bottom")
xnames = names if annot else names[:-1]
ax.set_xticklabels(xnames, rotation=90)
ynames = names if annot else names[1:]
ax.set_yticklabels(ynames)
minor_ticks = np.linspace(-.5, nvars - 1.5, nvars)
ax.set_xticks(minor_ticks, True)
ax.set_yticks(minor_ticks, True)
major_ticks = np.linspace(0, nvars - 1, nvars)
xticks = major_ticks if annot else major_ticks[:-1]
ax.set_xticks(xticks)
yticks = major_ticks if annot else major_ticks[1:]
ax.set_yticks(yticks)
ax.grid(False, which="major")
ax.grid(True, which="minor", linestyle="-")
return ax
def pairplot(data, hue=None, hue_order=None, palette=None,
vars=None, x_vars=None, y_vars=None,
kind="scatter", diag_kind="hist", markers=None,
size=2.5, aspect=1, dropna=True,
plot_kws=None, diag_kws=None, grid_kws=None):
"""Plot pairwise relationships in a dataset.
By default, this function will create a grid of Axes such that each
variable in ``data`` will by shared in the y-axis across a single row and
in the x-axis across a single column. The diagonal Axes are treated
differently, drawing a plot to show the univariate distribution of the data
for the variable in that column.
It is also possible to show a subset of variables or plot different
variables on the rows and columns.
This is a high-level interface for :class:`PairGrid` that is intended to
make it easy to draw a few common styles. You should use :class`PairGrid`
directly if you need more flexibility.
Parameters
----------
data : DataFrame
Tidy (long-form) dataframe where each column is a variable and
each row is an observation.
hue : string (variable name), optional
Variable in ``data`` to map plot aspects to different colors.
hue_order : list of strings
Order for the levels of the hue variable in the palette
palette : dict or seaborn color palette
Set of colors for mapping the ``hue`` variable. If a dict, keys
should be values in the ``hue`` variable.
vars : list of variable names, optional
Variables within ``data`` to use, otherwise use every column with
a numeric datatype.
{x, y}_vars : lists of variable names, optional
Variables within ``data`` to use separately for the rows and
columns of the figure; i.e. to make a non-square plot.
kind : {'scatter', 'reg'}, optional
Kind of plot for the non-identity relationships.
diag_kind : {'hist', 'kde'}, optional
Kind of plot for the diagonal subplots.
markers : single matplotlib marker code or list, optional
Either the marker to use for all datapoints or a list of markers with
a length the same as the number of levels in the hue variable so that
differently colored points will also have different scatterplot
markers.
size : scalar, optional
Height (in inches) of each facet.
aspect : scalar, optional
Aspect * size gives the width (in inches) of each facet.
dropna : boolean, optional
Drop missing values from the data before plotting.
{plot, diag, grid}_kws : dicts, optional
Dictionaries of keyword arguments.
Returns
-------
grid : PairGrid
Returns the underlying ``PairGrid`` instance for further tweaking.
See Also
--------
PairGrid : Subplot grid for more flexible plotting of pairwise
relationships.
Examples
--------
Draw scatterplots for joint relationships and histograms for univariate
distributions:
.. plot::
:context: close-figs
>>> import seaborn as sns; sns.set(style="ticks", color_codes=True)
>>> iris = sns.load_dataset("iris")
>>> g = sns.pairplot(iris)
Show different levels of a categorical variable by the color of plot
elements:
.. plot::
:context: close-figs
>>> g = sns.pairplot(iris, hue="species")
Use a different color palette:
.. plot::
:context: close-figs
>>> g = sns.pairplot(iris, hue="species", palette="husl")
Use different markers for each level of the hue variable:
.. plot::
:context: close-figs
>>> g = sns.pairplot(iris, hue="species", markers=["o", "s", "D"])
Plot a subset of variables:
.. plot::
:context: close-figs
>>> g = sns.pairplot(iris, vars=["sepal_width", "sepal_length"])
Draw larger plots:
.. plot::
:context: close-figs
>>> g = sns.pairplot(iris, size=3,
... vars=["sepal_width", "sepal_length"])
Plot different variables in the rows and columns:
.. plot::
:context: close-figs
>>> g = sns.pairplot(iris,
... x_vars=["sepal_width", "sepal_length"],
... y_vars=["petal_width", "petal_length"])
Use kernel density estimates for univariate plots:
.. plot::
:context: close-figs
>>> g = sns.pairplot(iris, diag_kind="kde")
Fit linear regression models to the scatter plots:
.. plot::
:context: close-figs
>>> g = sns.pairplot(iris, kind="reg")
Pass keyword arguments down to the underlying functions (it may be easier
to use :class:`PairGrid` directly):
.. plot::
:context: close-figs
>>> g = sns.pairplot(iris, diag_kind="kde", markers="+",
... plot_kws=dict(s=50, edgecolor="b", linewidth=1),
... diag_kws=dict(shade=True))
"""
if plot_kws is None:
plot_kws = {}
if diag_kws is None:
diag_kws = {}
if grid_kws is None:
grid_kws = {}
# Set up the PairGrid
diag_sharey = diag_kind == "hist"
grid = PairGrid(data, vars=vars, x_vars=x_vars, y_vars=y_vars, hue=hue,
hue_order=hue_order, palette=palette,
diag_sharey=diag_sharey,
size=size, aspect=aspect, dropna=dropna, **grid_kws)
# Add the markers here as PairGrid has figured out how many levels of the
# hue variable are needed and we don't want to duplicate that process
if markers is not None:
if grid.hue_names is None:
n_markers = 1
else:
n_markers = len(grid.hue_names)
if not isinstance(markers, list):
markers = [markers] * n_markers
if len(markers) != n_markers:
raise ValueError(("markers must be a singeton or a list of markers"
" for each level of the hue variable"))
grid.hue_kws = {"marker": markers}
# Maybe plot on the diagonal
if grid.square_grid:
if diag_kind == "hist":
grid.map_diag(plt.hist, **diag_kws)
elif diag_kind == "kde":
diag_kws["legend"] = False
grid.map_diag(kdeplot, **diag_kws)
# Maybe plot on the off-diagonals
if grid.square_grid and diag_kind is not None:
plotter = grid.map_offdiag
else:
plotter = grid.map
if kind == "scatter":
plot_kws.setdefault("edgecolor", "white")
plotter(plt.scatter, **plot_kws)
elif kind == "reg":
plotter(regplot, **plot_kws)
# Add a legend
if hue is not None:
grid.add_legend()
return grid
| bsd-3-clause |
pombo-lab/gamtools | lib/gamtools/permutation.py | 1 | 5968 | """
======================
The permutation module
======================
The permutation module contains functions for randomly permuting GAM
:ref:`segregation tables <segregation_table>`, which can be a useful
way of generating random backgrounds for comparison with real datasets.
"""
import numpy as np
import pandas as pd
from . import segregation
def permute_by_offset(sample_segregation, offset):
"""Circularly permute a single column of a segregation table.
This function takes one single column from a
:ref:`segregation_table` (i.e. one sample, or one NP) and circularly
permutes it by "offset" bins.
:param sample_segregation: Input column of a segregation table to permute.
:param int offset: Number of bins to permute by.
:returns: Returns a newly randomized :ref:`segregation_table`
"""
offset = offset % len(sample_segregation)
# Moving each value in an array of length L right by x bins is the same
# as splitting the data at bin (L - x) and swapping the two halves
corrected_offset = len(sample_segregation) - offset
new_start = sample_segregation.iloc[corrected_offset:]
new_end = sample_segregation.iloc[:corrected_offset]
new_col = pd.concat([new_start, new_end]).values
return new_col
def permute_by_chromosome(sample_segregation, offset):
"""Separately permute each chromosome from a single column of a segregation table.
This function takes one single column from a
:ref:`segregation_table` (i.e. one sample, or one NP) and circularly
permutes each chromosome separately by "offset" bins.
:param sample_segregation: Input column of a segregation table to permute.
:param int offset: Number of bins to permute by.
:returns: Returns a newly randomized :ref:`segregation_table`
"""
permuted_chromosomes = []
chrom_index = sample_segregation.index.get_level_values(0)
for chrom in chrom_index.unique():
original_chromosome = sample_segregation[chrom_index == chrom]
permuted_chromosome = permute_by_offset(original_chromosome, offset)
permuted_chromosomes.append(permuted_chromosome)
permuted_segregation = np.concatenate(permuted_chromosomes)
return permuted_segregation
def permute_segregation(input_segregation):
"""Circularly permute each column of a segregation table.
This function takes a table of GAM segregation data (see
:ref:`segregation_table`) and circularly permutes each column by a random
amount. For example, if the column contains [ 0, 0, 0, 1, 1, 1 ] then a
circular permutation by one unit would give: [ 1, 0, 0, 0, 1, 1 ]. This can
be useful because it preserves the scaling features of a GAM matrix (i.e.
windows which lie next to each other are still more likely to co-segregate
in the same tube) but it randomizes long-range interactions.
Another feature of "real" GAM data is that it contains unmappable regions
with no signal. In order to preserve this feature, genomic regions which
are never detected in the input :ref:`segregation_table` are not subjected
to permutation.
:param input_segregation: Input segregation table to permute.
:type input_segregation: :ref:`segregation_table`
:returns: Returns a newly randomized :ref:`segregation_table`
"""
# Only permute positions that were mapped at least once
# This means we preserve the locations of centromeres etc,
# but requires that the input data has a large number of
# columns
mappable = input_segregation.sum(axis=1).astype(bool)
no_windows, no_samples = input_segregation[mappable].shape
# Make a copy of the original data
permutation = input_segregation.copy()
# Loop over columns
for i in range(no_samples):
# Choose a random position to break the segregation in two,
# Swap the two chunks around and write them to the copied df
offset = np.random.randint(no_windows)
new_col = permute_by_chromosome(input_segregation.iloc[mappable.values, i], offset)
permutation.iloc[mappable.values, i] = new_col
return permutation
def permute_segregation_autosomal(input_segregation, autosomes=None):
"""Circularly permute each autosomal chromosome in a segregation table
This function takes a table of GAM segregation data (see
:ref:`segregation_table`) and circularly permutes each autosomal chromosome
by a random amount. For each GAM sample, The last n values covering each
chromosome are moved from the end of the chromosome to the beginning, where
n is a random integer between 0 and the length of the chromosome.
Separately permuting each chromosome preserves their individual detection
frequencies, and avoids permuting genomic regions with very different
detection frequencies (e.g. mitochondrial DNA and sex chromosomes) into
one another.
:param input_segregation: Input segregation table to permute.
:type input_segregation: :ref:`segregation_table`
:returns: Returns a newly randomized :ref:`segregation_table`
"""
# Sex chromosomes, unjoined contigs and mtDNA behave weirdly,
# so don't permute them into the autosomal regions.
autosomes = ['chr{0}'.format(c) for c in range(1, 20)]
is_autosomal = input_segregation.index.get_level_values(0).isin(autosomes)
segregation_to_permute = input_segregation.loc[is_autosomal]
permuted_segregation = permute_segregation(segregation_to_permute)
return permuted_segregation
def permute_segregation_from_args(args):
"""Extract parameters from an argparse namespace object and pass them to
permute_segregation_autosomal.
"""
input_segregation = segregation.open_segregation(args.segregation_file)
permuted_segregation = permute_segregation_autosomal(input_segregation)
permuted_segregation.to_csv(
args.output_file,
sep='\t',
header=True,
index=True)
| apache-2.0 |
jniediek/mne-python | examples/decoding/plot_decoding_spatio_temporal_source.py | 3 | 5973 | """
==========================
Decoding source space data
==========================
Decoding, a.k.a MVPA or supervised machine learning applied to MEG
data in source space on the left cortical surface. Here f-test feature
selection is employed to confine the classification to the potentially
relevant features. The classifier then is trained to selected features of
epochs in source space.
"""
# Author: Denis A. Engemann <[email protected]>
# Alexandre Gramfort <[email protected]>
#
# License: BSD (3-clause)
import mne
import os
import numpy as np
from mne import io
from mne.datasets import sample
from mne.minimum_norm import apply_inverse_epochs, read_inverse_operator
print(__doc__)
data_path = sample.data_path()
fname_fwd = data_path + 'MEG/sample/sample_audvis-meg-oct-6-fwd.fif'
fname_evoked = data_path + '/MEG/sample/sample_audvis-ave.fif'
subjects_dir = data_path + '/subjects'
subject = os.environ['SUBJECT'] = subjects_dir + '/sample'
os.environ['SUBJECTS_DIR'] = subjects_dir
###############################################################################
# Set parameters
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
fname_cov = data_path + '/MEG/sample/sample_audvis-cov.fif'
label_names = 'Aud-rh', 'Vis-rh'
fname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif'
tmin, tmax = -0.2, 0.5
event_id = dict(aud_r=2, vis_r=4) # load contra-lateral conditions
# Setup for reading the raw data
raw = io.read_raw_fif(raw_fname, preload=True)
raw.filter(2, None, method='iir') # replace baselining with high-pass
events = mne.read_events(event_fname)
# Set up pick list: MEG - bad channels (modify to your needs)
raw.info['bads'] += ['MEG 2443'] # mark bads
picks = mne.pick_types(raw.info, meg=True, eeg=False, stim=True, eog=True,
exclude='bads')
# Read epochs
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True,
picks=picks, baseline=None, preload=True,
reject=dict(grad=4000e-13, eog=150e-6),
decim=5) # decimate to save memory and increase speed
epochs.equalize_event_counts(list(event_id.keys()), 'mintime', copy=False)
epochs_list = [epochs[k] for k in event_id]
# Compute inverse solution
snr = 3.0
lambda2 = 1.0 / snr ** 2
method = "dSPM" # use dSPM method (could also be MNE or sLORETA)
n_times = len(epochs.times)
n_vertices = 3732
n_epochs = len(epochs.events)
# Load data and compute inverse solution and stcs for each epoch.
noise_cov = mne.read_cov(fname_cov)
inverse_operator = read_inverse_operator(fname_inv)
X = np.zeros([n_epochs, n_vertices, n_times])
# to save memory, we'll load and transform our epochs step by step.
for condition_count, ep in zip([0, n_epochs / 2], epochs_list):
stcs = apply_inverse_epochs(ep, inverse_operator, lambda2,
method, pick_ori="normal", # saves us memory
return_generator=True)
for jj, stc in enumerate(stcs):
X[condition_count + jj] = stc.lh_data
###############################################################################
# Decoding in sensor space using a linear SVM
# Make arrays X and y such that :
# X is 3d with X.shape[0] is the total number of epochs to classify
# y is filled with integers coding for the class to predict
# We must have X.shape[0] equal to y.shape[0]
# we know the first half belongs to the first class, the second one
y = np.repeat([0, 1], len(X) / 2) # belongs to the second class
X = X.reshape(n_epochs, n_vertices * n_times)
# we have to normalize the data before supplying them to our classifier
X -= X.mean(axis=0)
X /= X.std(axis=0)
# prepare classifier
from sklearn.svm import SVC # noqa
from sklearn.cross_validation import ShuffleSplit # noqa
# Define a monte-carlo cross-validation generator (reduce variance):
n_splits = 10
clf = SVC(C=1, kernel='linear')
cv = ShuffleSplit(len(X), n_splits, test_size=0.2)
# setup feature selection and classification pipeline
from sklearn.feature_selection import SelectKBest, f_classif # noqa
from sklearn.pipeline import Pipeline # noqa
# we will use an ANOVA f-test to preselect relevant spatio-temporal units
feature_selection = SelectKBest(f_classif, k=500) # take the best 500
# to make life easier we will create a pipeline object
anova_svc = Pipeline([('anova', feature_selection), ('svc', clf)])
# initialize score and feature weights result arrays
scores = np.zeros(n_splits)
feature_weights = np.zeros([n_vertices, n_times])
# hold on, this may take a moment
for ii, (train, test) in enumerate(cv):
anova_svc.fit(X[train], y[train])
y_pred = anova_svc.predict(X[test])
y_test = y[test]
scores[ii] = np.sum(y_pred == y_test) / float(len(y_test))
feature_weights += feature_selection.inverse_transform(clf.coef_) \
.reshape(n_vertices, n_times)
print('Average prediction accuracy: %0.3f | standard deviation: %0.3f'
% (scores.mean(), scores.std()))
# prepare feature weights for visualization
feature_weights /= (ii + 1) # create average weights
# create mask to avoid division error
feature_weights = np.ma.masked_array(feature_weights, feature_weights == 0)
# normalize scores for visualization purposes
feature_weights /= feature_weights.std(axis=1)[:, None]
feature_weights -= feature_weights.mean(axis=1)[:, None]
# unmask, take absolute values, emulate f-value scale
feature_weights = np.abs(feature_weights.data) * 10
vertices = [stc.lh_vertno, np.array([], int)] # empty array for right hemi
stc_feat = mne.SourceEstimate(feature_weights, vertices=vertices,
tmin=stc.tmin, tstep=stc.tstep,
subject='sample')
brain = stc_feat.plot(hemi='split', views=['lat', 'med'], transparent=True,
initial_time=0.1, time_unit='s')
| bsd-3-clause |
amolkahat/pandas | pandas/tests/indexing/test_categorical.py | 5 | 26870 | # -*- coding: utf-8 -*-
import pytest
import pandas as pd
import pandas.compat as compat
import numpy as np
from pandas import (Series, DataFrame, Timestamp, Categorical,
CategoricalIndex, Interval, Index)
from pandas.util.testing import assert_series_equal, assert_frame_equal
from pandas.util import testing as tm
from pandas.core.dtypes.common import is_categorical_dtype
from pandas.api.types import CategoricalDtype as CDT
from pandas.core.dtypes.dtypes import CategoricalDtype
class TestCategoricalIndex(object):
def setup_method(self, method):
self.df = DataFrame({'A': np.arange(6, dtype='int64'),
'B': Series(list('aabbca')).astype(
CDT(list('cab')))}).set_index('B')
self.df2 = DataFrame({'A': np.arange(6, dtype='int64'),
'B': Series(list('aabbca')).astype(
CDT(list('cabe')))}).set_index('B')
self.df3 = DataFrame({'A': np.arange(6, dtype='int64'),
'B': (Series([1, 1, 2, 1, 3, 2])
.astype(CDT([3, 2, 1], ordered=True)))
}).set_index('B')
self.df4 = DataFrame({'A': np.arange(6, dtype='int64'),
'B': (Series([1, 1, 2, 1, 3, 2])
.astype(CDT([3, 2, 1], ordered=False)))
}).set_index('B')
def test_loc_scalar(self):
result = self.df.loc['a']
expected = (DataFrame({'A': [0, 1, 5],
'B': (Series(list('aaa'))
.astype(CDT(list('cab'))))})
.set_index('B'))
assert_frame_equal(result, expected)
df = self.df.copy()
df.loc['a'] = 20
expected = (DataFrame({'A': [20, 20, 2, 3, 4, 20],
'B': (Series(list('aabbca'))
.astype(CDT(list('cab'))))})
.set_index('B'))
assert_frame_equal(df, expected)
# value not in the categories
pytest.raises(KeyError, lambda: df.loc['d'])
def f():
df.loc['d'] = 10
pytest.raises(TypeError, f)
def f():
df.loc['d', 'A'] = 10
pytest.raises(TypeError, f)
def f():
df.loc['d', 'C'] = 10
pytest.raises(TypeError, f)
def test_getitem_scalar(self):
cats = Categorical([Timestamp('12-31-1999'),
Timestamp('12-31-2000')])
s = Series([1, 2], index=cats)
expected = s.iloc[0]
result = s[cats[0]]
assert result == expected
def test_slicing_directly(self):
cat = Categorical(["a", "b", "c", "d", "a", "b", "c"])
sliced = cat[3]
assert sliced == "d"
sliced = cat[3:5]
expected = Categorical(["d", "a"], categories=['a', 'b', 'c', 'd'])
tm.assert_numpy_array_equal(sliced._codes, expected._codes)
tm.assert_index_equal(sliced.categories, expected.categories)
def test_slicing(self):
cat = Series(Categorical([1, 2, 3, 4]))
reversed = cat[::-1]
exp = np.array([4, 3, 2, 1], dtype=np.int64)
tm.assert_numpy_array_equal(reversed.__array__(), exp)
df = DataFrame({'value': (np.arange(100) + 1).astype('int64')})
df['D'] = pd.cut(df.value, bins=[0, 25, 50, 75, 100])
expected = Series([11, Interval(0, 25)], index=['value', 'D'], name=10)
result = df.iloc[10]
tm.assert_series_equal(result, expected)
expected = DataFrame({'value': np.arange(11, 21).astype('int64')},
index=np.arange(10, 20).astype('int64'))
expected['D'] = pd.cut(expected.value, bins=[0, 25, 50, 75, 100])
result = df.iloc[10:20]
tm.assert_frame_equal(result, expected)
expected = Series([9, Interval(0, 25)], index=['value', 'D'], name=8)
result = df.loc[8]
tm.assert_series_equal(result, expected)
def test_slicing_and_getting_ops(self):
# systematically test the slicing operations:
# for all slicing ops:
# - returning a dataframe
# - returning a column
# - returning a row
# - returning a single value
cats = Categorical(
["a", "c", "b", "c", "c", "c", "c"], categories=["a", "b", "c"])
idx = Index(["h", "i", "j", "k", "l", "m", "n"])
values = [1, 2, 3, 4, 5, 6, 7]
df = DataFrame({"cats": cats, "values": values}, index=idx)
# the expected values
cats2 = Categorical(["b", "c"], categories=["a", "b", "c"])
idx2 = Index(["j", "k"])
values2 = [3, 4]
# 2:4,: | "j":"k",:
exp_df = DataFrame({"cats": cats2, "values": values2}, index=idx2)
# :,"cats" | :,0
exp_col = Series(cats, index=idx, name='cats')
# "j",: | 2,:
exp_row = Series(["b", 3], index=["cats", "values"], dtype="object",
name="j")
# "j","cats | 2,0
exp_val = "b"
# iloc
# frame
res_df = df.iloc[2:4, :]
tm.assert_frame_equal(res_df, exp_df)
assert is_categorical_dtype(res_df["cats"])
# row
res_row = df.iloc[2, :]
tm.assert_series_equal(res_row, exp_row)
assert isinstance(res_row["cats"], compat.string_types)
# col
res_col = df.iloc[:, 0]
tm.assert_series_equal(res_col, exp_col)
assert is_categorical_dtype(res_col)
# single value
res_val = df.iloc[2, 0]
assert res_val == exp_val
# loc
# frame
res_df = df.loc["j":"k", :]
tm.assert_frame_equal(res_df, exp_df)
assert is_categorical_dtype(res_df["cats"])
# row
res_row = df.loc["j", :]
tm.assert_series_equal(res_row, exp_row)
assert isinstance(res_row["cats"], compat.string_types)
# col
res_col = df.loc[:, "cats"]
tm.assert_series_equal(res_col, exp_col)
assert is_categorical_dtype(res_col)
# single value
res_val = df.loc["j", "cats"]
assert res_val == exp_val
# ix
# frame
# res_df = df.loc["j":"k",[0,1]] # doesn't work?
res_df = df.loc["j":"k", :]
tm.assert_frame_equal(res_df, exp_df)
assert is_categorical_dtype(res_df["cats"])
# row
res_row = df.loc["j", :]
tm.assert_series_equal(res_row, exp_row)
assert isinstance(res_row["cats"], compat.string_types)
# col
res_col = df.loc[:, "cats"]
tm.assert_series_equal(res_col, exp_col)
assert is_categorical_dtype(res_col)
# single value
res_val = df.loc["j", df.columns[0]]
assert res_val == exp_val
# iat
res_val = df.iat[2, 0]
assert res_val == exp_val
# at
res_val = df.at["j", "cats"]
assert res_val == exp_val
# fancy indexing
exp_fancy = df.iloc[[2]]
res_fancy = df[df["cats"] == "b"]
tm.assert_frame_equal(res_fancy, exp_fancy)
res_fancy = df[df["values"] == 3]
tm.assert_frame_equal(res_fancy, exp_fancy)
# get_value
res_val = df.at["j", "cats"]
assert res_val == exp_val
# i : int, slice, or sequence of integers
res_row = df.iloc[2]
tm.assert_series_equal(res_row, exp_row)
assert isinstance(res_row["cats"], compat.string_types)
res_df = df.iloc[slice(2, 4)]
tm.assert_frame_equal(res_df, exp_df)
assert is_categorical_dtype(res_df["cats"])
res_df = df.iloc[[2, 3]]
tm.assert_frame_equal(res_df, exp_df)
assert is_categorical_dtype(res_df["cats"])
res_col = df.iloc[:, 0]
tm.assert_series_equal(res_col, exp_col)
assert is_categorical_dtype(res_col)
res_df = df.iloc[:, slice(0, 2)]
tm.assert_frame_equal(res_df, df)
assert is_categorical_dtype(res_df["cats"])
res_df = df.iloc[:, [0, 1]]
tm.assert_frame_equal(res_df, df)
assert is_categorical_dtype(res_df["cats"])
def test_slicing_doc_examples(self):
# GH 7918
cats = Categorical(["a", "b", "b", "b", "c", "c", "c"],
categories=["a", "b", "c"])
idx = Index(["h", "i", "j", "k", "l", "m", "n", ])
values = [1, 2, 2, 2, 3, 4, 5]
df = DataFrame({"cats": cats, "values": values}, index=idx)
result = df.iloc[2:4, :]
expected = DataFrame(
{"cats": Categorical(['b', 'b'], categories=['a', 'b', 'c']),
"values": [2, 2]}, index=['j', 'k'])
tm.assert_frame_equal(result, expected)
result = df.iloc[2:4, :].dtypes
expected = Series(['category', 'int64'], ['cats', 'values'])
tm.assert_series_equal(result, expected)
result = df.loc["h":"j", "cats"]
expected = Series(Categorical(['a', 'b', 'b'],
categories=['a', 'b', 'c']),
index=['h', 'i', 'j'], name='cats')
tm.assert_series_equal(result, expected)
result = df.loc["h":"j", df.columns[0:1]]
expected = DataFrame({'cats': Categorical(['a', 'b', 'b'],
categories=['a', 'b', 'c'])},
index=['h', 'i', 'j'])
tm.assert_frame_equal(result, expected)
def test_getitem_category_type(self):
# GH 14580
# test iloc() on Series with Categorical data
s = Series([1, 2, 3]).astype('category')
# get slice
result = s.iloc[0:2]
expected = Series([1, 2]).astype(CategoricalDtype([1, 2, 3]))
tm.assert_series_equal(result, expected)
# get list of indexes
result = s.iloc[[0, 1]]
expected = Series([1, 2]).astype(CategoricalDtype([1, 2, 3]))
tm.assert_series_equal(result, expected)
# get boolean array
result = s.iloc[[True, False, False]]
expected = Series([1]).astype(CategoricalDtype([1, 2, 3]))
tm.assert_series_equal(result, expected)
def test_loc_listlike(self):
# list of labels
result = self.df.loc[['c', 'a']]
expected = self.df.iloc[[4, 0, 1, 5]]
assert_frame_equal(result, expected, check_index_type=True)
result = self.df2.loc[['a', 'b', 'e']]
exp_index = CategoricalIndex(
list('aaabbe'), categories=list('cabe'), name='B')
expected = DataFrame({'A': [0, 1, 5, 2, 3, np.nan]}, index=exp_index)
assert_frame_equal(result, expected, check_index_type=True)
# element in the categories but not in the values
pytest.raises(KeyError, lambda: self.df2.loc['e'])
# assign is ok
df = self.df2.copy()
df.loc['e'] = 20
result = df.loc[['a', 'b', 'e']]
exp_index = CategoricalIndex(
list('aaabbe'), categories=list('cabe'), name='B')
expected = DataFrame({'A': [0, 1, 5, 2, 3, 20]}, index=exp_index)
assert_frame_equal(result, expected)
df = self.df2.copy()
result = df.loc[['a', 'b', 'e']]
exp_index = CategoricalIndex(
list('aaabbe'), categories=list('cabe'), name='B')
expected = DataFrame({'A': [0, 1, 5, 2, 3, np.nan]}, index=exp_index)
assert_frame_equal(result, expected, check_index_type=True)
# not all labels in the categories
with pytest.raises(KeyError):
self.df2.loc[['a', 'd']]
def test_loc_listlike_dtypes(self):
# GH 11586
# unique categories and codes
index = CategoricalIndex(['a', 'b', 'c'])
df = DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]}, index=index)
# unique slice
res = df.loc[['a', 'b']]
exp_index = CategoricalIndex(['a', 'b'],
categories=index.categories)
exp = DataFrame({'A': [1, 2], 'B': [4, 5]}, index=exp_index)
tm.assert_frame_equal(res, exp, check_index_type=True)
# duplicated slice
res = df.loc[['a', 'a', 'b']]
exp_index = CategoricalIndex(['a', 'a', 'b'],
categories=index.categories)
exp = DataFrame({'A': [1, 1, 2], 'B': [4, 4, 5]}, index=exp_index)
tm.assert_frame_equal(res, exp, check_index_type=True)
with tm.assert_raises_regex(
KeyError,
'a list-indexer must only include values that are '
'in the categories'):
df.loc[['a', 'x']]
# duplicated categories and codes
index = CategoricalIndex(['a', 'b', 'a'])
df = DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]}, index=index)
# unique slice
res = df.loc[['a', 'b']]
exp = DataFrame({'A': [1, 3, 2],
'B': [4, 6, 5]},
index=CategoricalIndex(['a', 'a', 'b']))
tm.assert_frame_equal(res, exp, check_index_type=True)
# duplicated slice
res = df.loc[['a', 'a', 'b']]
exp = DataFrame(
{'A': [1, 3, 1, 3, 2],
'B': [4, 6, 4, 6, 5
]}, index=CategoricalIndex(['a', 'a', 'a', 'a', 'b']))
tm.assert_frame_equal(res, exp, check_index_type=True)
with tm.assert_raises_regex(
KeyError,
'a list-indexer must only include values '
'that are in the categories'):
df.loc[['a', 'x']]
# contains unused category
index = CategoricalIndex(
['a', 'b', 'a', 'c'], categories=list('abcde'))
df = DataFrame({'A': [1, 2, 3, 4], 'B': [5, 6, 7, 8]}, index=index)
res = df.loc[['a', 'b']]
exp = DataFrame({'A': [1, 3, 2], 'B': [5, 7, 6]},
index=CategoricalIndex(['a', 'a', 'b'],
categories=list('abcde')))
tm.assert_frame_equal(res, exp, check_index_type=True)
res = df.loc[['a', 'e']]
exp = DataFrame({'A': [1, 3, np.nan], 'B': [5, 7, np.nan]},
index=CategoricalIndex(['a', 'a', 'e'],
categories=list('abcde')))
tm.assert_frame_equal(res, exp, check_index_type=True)
# duplicated slice
res = df.loc[['a', 'a', 'b']]
exp = DataFrame({'A': [1, 3, 1, 3, 2], 'B': [5, 7, 5, 7, 6]},
index=CategoricalIndex(['a', 'a', 'a', 'a', 'b'],
categories=list('abcde')))
tm.assert_frame_equal(res, exp, check_index_type=True)
with tm.assert_raises_regex(
KeyError,
'a list-indexer must only include values '
'that are in the categories'):
df.loc[['a', 'x']]
def test_get_indexer_array(self):
arr = np.array([Timestamp('1999-12-31 00:00:00'),
Timestamp('2000-12-31 00:00:00')], dtype=object)
cats = [Timestamp('1999-12-31 00:00:00'),
Timestamp('2000-12-31 00:00:00')]
ci = CategoricalIndex(cats,
categories=cats,
ordered=False, dtype='category')
result = ci.get_indexer(arr)
expected = np.array([0, 1], dtype='intp')
tm.assert_numpy_array_equal(result, expected)
def test_get_indexer_same_categories_same_order(self):
ci = CategoricalIndex(['a', 'b'], categories=['a', 'b'])
result = ci.get_indexer(CategoricalIndex(['b', 'b'],
categories=['a', 'b']))
expected = np.array([1, 1], dtype='intp')
tm.assert_numpy_array_equal(result, expected)
def test_get_indexer_same_categories_different_order(self):
# https://github.com/pandas-dev/pandas/issues/19551
ci = CategoricalIndex(['a', 'b'], categories=['a', 'b'])
result = ci.get_indexer(CategoricalIndex(['b', 'b'],
categories=['b', 'a']))
expected = np.array([1, 1], dtype='intp')
tm.assert_numpy_array_equal(result, expected)
def test_getitem_with_listlike(self):
# GH 16115
cats = Categorical([Timestamp('12-31-1999'),
Timestamp('12-31-2000')])
expected = DataFrame([[1, 0], [0, 1]], dtype='uint8',
index=[0, 1], columns=cats)
dummies = pd.get_dummies(cats)
result = dummies[[c for c in dummies.columns]]
assert_frame_equal(result, expected)
def test_setitem_listlike(self):
# GH 9469
# properly coerce the input indexers
np.random.seed(1)
c = Categorical(np.random.randint(0, 5, size=150000).astype(
np.int8)).add_categories([-1000])
indexer = np.array([100000]).astype(np.int64)
c[indexer] = -1000
# we are asserting the code result here
# which maps to the -1000 category
result = c.codes[np.array([100000]).astype(np.int64)]
tm.assert_numpy_array_equal(result, np.array([5], dtype='int8'))
def test_ix_categorical_index(self):
# GH 12531
df = DataFrame(np.random.randn(3, 3),
index=list('ABC'), columns=list('XYZ'))
cdf = df.copy()
cdf.index = CategoricalIndex(df.index)
cdf.columns = CategoricalIndex(df.columns)
expect = Series(df.loc['A', :], index=cdf.columns, name='A')
assert_series_equal(cdf.loc['A', :], expect)
expect = Series(df.loc[:, 'X'], index=cdf.index, name='X')
assert_series_equal(cdf.loc[:, 'X'], expect)
exp_index = CategoricalIndex(list('AB'), categories=['A', 'B', 'C'])
expect = DataFrame(df.loc[['A', 'B'], :], columns=cdf.columns,
index=exp_index)
assert_frame_equal(cdf.loc[['A', 'B'], :], expect)
exp_columns = CategoricalIndex(list('XY'),
categories=['X', 'Y', 'Z'])
expect = DataFrame(df.loc[:, ['X', 'Y']], index=cdf.index,
columns=exp_columns)
assert_frame_equal(cdf.loc[:, ['X', 'Y']], expect)
# non-unique
df = DataFrame(np.random.randn(3, 3),
index=list('ABA'), columns=list('XYX'))
cdf = df.copy()
cdf.index = CategoricalIndex(df.index)
cdf.columns = CategoricalIndex(df.columns)
exp_index = CategoricalIndex(list('AA'), categories=['A', 'B'])
expect = DataFrame(df.loc['A', :], columns=cdf.columns,
index=exp_index)
assert_frame_equal(cdf.loc['A', :], expect)
exp_columns = CategoricalIndex(list('XX'), categories=['X', 'Y'])
expect = DataFrame(df.loc[:, 'X'], index=cdf.index,
columns=exp_columns)
assert_frame_equal(cdf.loc[:, 'X'], expect)
expect = DataFrame(df.loc[['A', 'B'], :], columns=cdf.columns,
index=CategoricalIndex(list('AAB')))
assert_frame_equal(cdf.loc[['A', 'B'], :], expect)
expect = DataFrame(df.loc[:, ['X', 'Y']], index=cdf.index,
columns=CategoricalIndex(list('XXY')))
assert_frame_equal(cdf.loc[:, ['X', 'Y']], expect)
def test_read_only_source(self):
# GH 10043
rw_array = np.eye(10)
rw_df = DataFrame(rw_array)
ro_array = np.eye(10)
ro_array.setflags(write=False)
ro_df = DataFrame(ro_array)
assert_frame_equal(rw_df.iloc[[1, 2, 3]], ro_df.iloc[[1, 2, 3]])
assert_frame_equal(rw_df.iloc[[1]], ro_df.iloc[[1]])
assert_series_equal(rw_df.iloc[1], ro_df.iloc[1])
assert_frame_equal(rw_df.iloc[1:3], ro_df.iloc[1:3])
assert_frame_equal(rw_df.loc[[1, 2, 3]], ro_df.loc[[1, 2, 3]])
assert_frame_equal(rw_df.loc[[1]], ro_df.loc[[1]])
assert_series_equal(rw_df.loc[1], ro_df.loc[1])
assert_frame_equal(rw_df.loc[1:3], ro_df.loc[1:3])
def test_reindexing(self):
# reindexing
# convert to a regular index
result = self.df2.reindex(['a', 'b', 'e'])
expected = DataFrame({'A': [0, 1, 5, 2, 3, np.nan],
'B': Series(list('aaabbe'))}).set_index('B')
assert_frame_equal(result, expected, check_index_type=True)
result = self.df2.reindex(['a', 'b'])
expected = DataFrame({'A': [0, 1, 5, 2, 3],
'B': Series(list('aaabb'))}).set_index('B')
assert_frame_equal(result, expected, check_index_type=True)
result = self.df2.reindex(['e'])
expected = DataFrame({'A': [np.nan],
'B': Series(['e'])}).set_index('B')
assert_frame_equal(result, expected, check_index_type=True)
result = self.df2.reindex(['d'])
expected = DataFrame({'A': [np.nan],
'B': Series(['d'])}).set_index('B')
assert_frame_equal(result, expected, check_index_type=True)
# since we are actually reindexing with a Categorical
# then return a Categorical
cats = list('cabe')
result = self.df2.reindex(Categorical(['a', 'd'], categories=cats))
expected = DataFrame({'A': [0, 1, 5, np.nan],
'B': Series(list('aaad')).astype(
CDT(cats))}).set_index('B')
assert_frame_equal(result, expected, check_index_type=True)
result = self.df2.reindex(Categorical(['a'], categories=cats))
expected = DataFrame({'A': [0, 1, 5],
'B': Series(list('aaa')).astype(
CDT(cats))}).set_index('B')
assert_frame_equal(result, expected, check_index_type=True)
result = self.df2.reindex(['a', 'b', 'e'])
expected = DataFrame({'A': [0, 1, 5, 2, 3, np.nan],
'B': Series(list('aaabbe'))}).set_index('B')
assert_frame_equal(result, expected, check_index_type=True)
result = self.df2.reindex(['a', 'b'])
expected = DataFrame({'A': [0, 1, 5, 2, 3],
'B': Series(list('aaabb'))}).set_index('B')
assert_frame_equal(result, expected, check_index_type=True)
result = self.df2.reindex(['e'])
expected = DataFrame({'A': [np.nan],
'B': Series(['e'])}).set_index('B')
assert_frame_equal(result, expected, check_index_type=True)
# give back the type of categorical that we received
result = self.df2.reindex(Categorical(
['a', 'd'], categories=cats, ordered=True))
expected = DataFrame(
{'A': [0, 1, 5, np.nan],
'B': Series(list('aaad')).astype(
CDT(cats, ordered=True))}).set_index('B')
assert_frame_equal(result, expected, check_index_type=True)
result = self.df2.reindex(Categorical(
['a', 'd'], categories=['a', 'd']))
expected = DataFrame({'A': [0, 1, 5, np.nan],
'B': Series(list('aaad')).astype(
CDT(['a', 'd']))}).set_index('B')
assert_frame_equal(result, expected, check_index_type=True)
# passed duplicate indexers are not allowed
pytest.raises(ValueError, lambda: self.df2.reindex(['a', 'a']))
# args NotImplemented ATM
pytest.raises(NotImplementedError,
lambda: self.df2.reindex(['a'], method='ffill'))
pytest.raises(NotImplementedError,
lambda: self.df2.reindex(['a'], level=1))
pytest.raises(NotImplementedError,
lambda: self.df2.reindex(['a'], limit=2))
def test_loc_slice(self):
# slicing
# not implemented ATM
# GH9748
pytest.raises(TypeError, lambda: self.df.loc[1:5])
# result = df.loc[1:5]
# expected = df.iloc[[1,2,3,4]]
# assert_frame_equal(result, expected)
def test_boolean_selection(self):
df3 = self.df3
df4 = self.df4
result = df3[df3.index == 'a']
expected = df3.iloc[[]]
assert_frame_equal(result, expected)
result = df4[df4.index == 'a']
expected = df4.iloc[[]]
assert_frame_equal(result, expected)
result = df3[df3.index == 1]
expected = df3.iloc[[0, 1, 3]]
assert_frame_equal(result, expected)
result = df4[df4.index == 1]
expected = df4.iloc[[0, 1, 3]]
assert_frame_equal(result, expected)
# since we have an ordered categorical
# CategoricalIndex([1, 1, 2, 1, 3, 2],
# categories=[3, 2, 1],
# ordered=True,
# name=u'B')
result = df3[df3.index < 2]
expected = df3.iloc[[4]]
assert_frame_equal(result, expected)
result = df3[df3.index > 1]
expected = df3.iloc[[]]
assert_frame_equal(result, expected)
# unordered
# cannot be compared
# CategoricalIndex([1, 1, 2, 1, 3, 2],
# categories=[3, 2, 1],
# ordered=False,
# name=u'B')
pytest.raises(TypeError, lambda: df4[df4.index < 2])
pytest.raises(TypeError, lambda: df4[df4.index > 1])
def test_indexing_with_category(self):
# https://github.com/pandas-dev/pandas/issues/12564
# consistent result if comparing as Dataframe
cat = DataFrame({'A': ['foo', 'bar', 'baz']})
exp = DataFrame({'A': [True, False, False]})
res = (cat[['A']] == 'foo')
tm.assert_frame_equal(res, exp)
cat['A'] = cat['A'].astype('category')
res = (cat[['A']] == 'foo')
tm.assert_frame_equal(res, exp)
def test_map_with_dict_or_series(self):
orig_values = ['a', 'B', 1, 'a']
new_values = ['one', 2, 3.0, 'one']
cur_index = pd.CategoricalIndex(orig_values, name='XXX')
expected = pd.CategoricalIndex(new_values,
name='XXX', categories=[3.0, 2, 'one'])
mapper = pd.Series(new_values[:-1], index=orig_values[:-1])
output = cur_index.map(mapper)
# Order of categories in output can be different
tm.assert_index_equal(expected, output)
mapper = {o: n for o, n in
zip(orig_values[:-1], new_values[:-1])}
output = cur_index.map(mapper)
# Order of categories in output can be different
tm.assert_index_equal(expected, output)
| bsd-3-clause |
amaliujia/lenskit | lenskit-integration-tests/src/it/gradle/external-algorithms/verify.py | 7 | 1551 | # LensKit, an open source recommender systems toolkit.
# Copyright 2010-2014 Regents of the University of Minnesota and contributors
# Work on LensKit has been funded by the National Science Foundation under
# grants IIS 05-34939, 08-08692, 08-12148, and 10-17697.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation; either version 2.1 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
# Verification script to make sure that all 3 algorithms produce the same output.
# Uses Pandas.
import sys
import pandas as pd
preds = pd.read_csv('predictions.csv')
algos = set(preds['Algorithm'])
preds_wide = preds.pivot_table(index=['User', 'Item', 'Rating'],
columns='Algorithm',
values='Prediction')
pred_range = preds_wide.max(1) - preds_wide.min(1)
bad = preds_wide[pred_range >= 0.001]
if len(bad) > 0:
print >>sys.stderr, "Have %d bad predictions" % (len(bad),)
print bad
sys.exit(1)
| lgpl-2.1 |
jlegendary/scikit-learn | examples/applications/plot_outlier_detection_housing.py | 243 | 5577 | """
====================================
Outlier detection on a real data set
====================================
This example illustrates the need for robust covariance estimation
on a real data set. It is useful both for outlier detection and for
a better understanding of the data structure.
We selected two sets of two variables from the Boston housing data set
as an illustration of what kind of analysis can be done with several
outlier detection tools. For the purpose of visualization, we are working
with two-dimensional examples, but one should be aware that things are
not so trivial in high-dimension, as it will be pointed out.
In both examples below, the main result is that the empirical covariance
estimate, as a non-robust one, is highly influenced by the heterogeneous
structure of the observations. Although the robust covariance estimate is
able to focus on the main mode of the data distribution, it sticks to the
assumption that the data should be Gaussian distributed, yielding some biased
estimation of the data structure, but yet accurate to some extent.
The One-Class SVM algorithm
First example
-------------
The first example illustrates how robust covariance estimation can help
concentrating on a relevant cluster when another one exists. Here, many
observations are confounded into one and break down the empirical covariance
estimation.
Of course, some screening tools would have pointed out the presence of two
clusters (Support Vector Machines, Gaussian Mixture Models, univariate
outlier detection, ...). But had it been a high-dimensional example, none
of these could be applied that easily.
Second example
--------------
The second example shows the ability of the Minimum Covariance Determinant
robust estimator of covariance to concentrate on the main mode of the data
distribution: the location seems to be well estimated, although the covariance
is hard to estimate due to the banana-shaped distribution. Anyway, we can
get rid of some outlying observations.
The One-Class SVM is able to capture the real data structure, but the
difficulty is to adjust its kernel bandwidth parameter so as to obtain
a good compromise between the shape of the data scatter matrix and the
risk of over-fitting the data.
"""
print(__doc__)
# Author: Virgile Fritsch <[email protected]>
# License: BSD 3 clause
import numpy as np
from sklearn.covariance import EllipticEnvelope
from sklearn.svm import OneClassSVM
import matplotlib.pyplot as plt
import matplotlib.font_manager
from sklearn.datasets import load_boston
# Get data
X1 = load_boston()['data'][:, [8, 10]] # two clusters
X2 = load_boston()['data'][:, [5, 12]] # "banana"-shaped
# Define "classifiers" to be used
classifiers = {
"Empirical Covariance": EllipticEnvelope(support_fraction=1.,
contamination=0.261),
"Robust Covariance (Minimum Covariance Determinant)":
EllipticEnvelope(contamination=0.261),
"OCSVM": OneClassSVM(nu=0.261, gamma=0.05)}
colors = ['m', 'g', 'b']
legend1 = {}
legend2 = {}
# Learn a frontier for outlier detection with several classifiers
xx1, yy1 = np.meshgrid(np.linspace(-8, 28, 500), np.linspace(3, 40, 500))
xx2, yy2 = np.meshgrid(np.linspace(3, 10, 500), np.linspace(-5, 45, 500))
for i, (clf_name, clf) in enumerate(classifiers.items()):
plt.figure(1)
clf.fit(X1)
Z1 = clf.decision_function(np.c_[xx1.ravel(), yy1.ravel()])
Z1 = Z1.reshape(xx1.shape)
legend1[clf_name] = plt.contour(
xx1, yy1, Z1, levels=[0], linewidths=2, colors=colors[i])
plt.figure(2)
clf.fit(X2)
Z2 = clf.decision_function(np.c_[xx2.ravel(), yy2.ravel()])
Z2 = Z2.reshape(xx2.shape)
legend2[clf_name] = plt.contour(
xx2, yy2, Z2, levels=[0], linewidths=2, colors=colors[i])
legend1_values_list = list( legend1.values() )
legend1_keys_list = list( legend1.keys() )
# Plot the results (= shape of the data points cloud)
plt.figure(1) # two clusters
plt.title("Outlier detection on a real data set (boston housing)")
plt.scatter(X1[:, 0], X1[:, 1], color='black')
bbox_args = dict(boxstyle="round", fc="0.8")
arrow_args = dict(arrowstyle="->")
plt.annotate("several confounded points", xy=(24, 19),
xycoords="data", textcoords="data",
xytext=(13, 10), bbox=bbox_args, arrowprops=arrow_args)
plt.xlim((xx1.min(), xx1.max()))
plt.ylim((yy1.min(), yy1.max()))
plt.legend((legend1_values_list[0].collections[0],
legend1_values_list[1].collections[0],
legend1_values_list[2].collections[0]),
(legend1_keys_list[0], legend1_keys_list[1], legend1_keys_list[2]),
loc="upper center",
prop=matplotlib.font_manager.FontProperties(size=12))
plt.ylabel("accessibility to radial highways")
plt.xlabel("pupil-teacher ratio by town")
legend2_values_list = list( legend2.values() )
legend2_keys_list = list( legend2.keys() )
plt.figure(2) # "banana" shape
plt.title("Outlier detection on a real data set (boston housing)")
plt.scatter(X2[:, 0], X2[:, 1], color='black')
plt.xlim((xx2.min(), xx2.max()))
plt.ylim((yy2.min(), yy2.max()))
plt.legend((legend2_values_list[0].collections[0],
legend2_values_list[1].collections[0],
legend2_values_list[2].collections[0]),
(legend2_values_list[0], legend2_values_list[1], legend2_values_list[2]),
loc="upper center",
prop=matplotlib.font_manager.FontProperties(size=12))
plt.ylabel("% lower status of the population")
plt.xlabel("average number of rooms per dwelling")
plt.show()
| bsd-3-clause |
GoogleCloudPlatform/keras-idiomatic-programmer | zoo/pretraining_c.py | 1 | 12034 | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from tensorflow.keras import Sequential, Model, Input
from tensorflow.keras import layers
from tensorflow.keras.layers import ReLU, Dense, Conv2D, Conv2DTranspose
from tensorflow.keras.layers import DepthwiseConv2D, SeparableConv2D, Dropout
from tensorflow.keras.layers import GlobalAveragePooling2D, Activation, BatchNormalization
from tensorflow.keras.regularizers import l2
from tensorflow.keras.optimizers import Adam, SGD
from tensorflow.compat.v1.keras.initializers import glorot_uniform, he_normal
from tensorflow.keras.callbacks import LearningRateScheduler
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.utils import to_categorical
import tensorflow_datasets as tfds
import tensorflow.keras.backend as K
import numpy as np
from sklearn.model_selection import train_test_split
import random
import math
import sys, os, json
class Pretraining(object):
''' Pretraining base (super) class for Composable Models '''
def __init__(self):
""" Constructor
"""
pass
###
# Pre-Training
###
# training variables
w_lr = 0 # target warmup rate
w_epochs = 0 # number of epochs in warmup
def init_draw(self, x_train=None, y_train=None, ndraws=5, epochs=3, steps=350, lr=1e-06,
batch_size=32, metric='loss', early=False, save=None):
""" Use the lottery ticket principle to find the best weight initialization
x_train : training images
y_train : training labels
ndraws : number of draws to find the winning lottery ticket
epochs : number of trial epochs
steps : number of steps per epoch
lr : tiny learning rate
batch_size: batch size
metric : metric used for determining best draw
early : whether to early stop when best draw found
save : file to save initialized weights to
"""
print("\n*** Initialize Draw")
if x_train is None:
x_train = self.x_train
y_train = self.y_train
loss = sys.float_info.max
acc = 0
w_best = None
# previous values
prev = None
p_draws = 0
if save is not None:
for path in [ save, save + '/init']:
try:
os.mkdir(path)
except:
pass
if os.path.exists(save + '/init/best.json'):
with open(save + '/init/best.json', 'r') as f:
data = json.load(f)
loss = float(data['loss'])
acc = float(data['acc'])
p_draws = int(data['ndraws'])
self.model.load_weights(save + '/init/chkpt')
w_best = self.model.get_weights()
print("Previous best, loss =", loss, 'acc = ', acc)
try:
prev = [ data['prev'], { 'loss': loss, 'acc': acc, 'ndraws': p_draws } ]
except:
prev = { 'loss': loss, 'acc': acc, 'ndraws': p_draws }
for _ in range(ndraws):
self.model = tf.keras.models.clone_model(self.model)
self.compile(optimizer=Adam(lr))
w = self.model.get_weights()
# Create generator for training in steps
datagen = ImageDataGenerator()
print("\n*** Lottery", _ + 1, "of", ndraws)
self.model.fit(datagen.flow(x_train, y_train, batch_size=batch_size),
epochs=epochs, steps_per_epoch=steps, verbose=1)
# Next Best
d_loss = self.model.history.history['loss'][epochs-1]
d_acc = self.model.history.history['acc'][epochs-1]
if d_loss < loss:
loss = d_loss
acc = d_acc
w_best = self.model.get_weights()
print("\n*** Current Best:", metric, loss)
if early:
ndraws = _ + 1
break
if save is not None:
self._save_best(save, loss, acc, p_draws + _ + 1, epochs, steps, prev)
# Set the best
if w_best is not None:
self.model.set_weights(w_best)
# Save the initialized weights
if save is not None:
self._save_best(save, loss, acc, p_draws + ndraws, epochs, steps, prev)
print("\n*** Selected Draw:", metric, loss)
def _save_best(self, save, loss, acc, ndraws, epochs, steps, prev=None):
""" Save current best weights
save : directort to save weights
loss : metric information
acc : metric information
ndraws: total number of draws
epochs: number of epochs
steps : number of steps per epoch
prev : previous results
"""
# Late Resetting
self.model.save_weights(save + '/init/chkpt')
with open(save + "/init/best.json", "w") as f:
if prev is None:
data = {'loss': loss, 'acc': acc, 'ndraws': ndraws, 'epochs': epochs, 'steps': steps}
else:
data = {'loss': loss, 'acc': acc, 'ndraws': ndraws, 'epochs': epochs, 'steps': steps, 'prev': prev}
data = json.dumps(data)
f.write(data)
def warmup_scheduler(self, epoch, lr):
""" learning rate schedular for warmup training
epoch : current epoch iteration
lr : current learning rate
"""
if epoch == 0:
return lr
if epoch == 2:
# loss is diverging
if self.model.history.history['loss'][1] > self.model.history.history['loss'][0]:
print("*** Loss is diverging, Reducing Warmnup Rate")
self.w_lr /= 10
return epoch * self.w_lr / self.w_epochs
def warmup(self, x_train=None, y_train=None, epochs=5, batch_size=32, s_lr=1e-6, e_lr=0.001,
loss='categorical_crossentropy', metrics=['acc'], save=None):
""" Warmup for numerical stability
x_train : training images
y_train : training labels
epochs : number of epochs for warmup
batch_size: batch size
s_lr : start warmup learning rate
e_lr : end warmup learning rate
loss : loss function
metrics : training metrics to report
save : file to save warmup weights
"""
print("\n*** Warmup (for numerical stability)")
if x_train is None:
x_train = self.x_train
y_train = self.y_train
# Load selected weight initialization draw
if save is not None:
for path in [ save, save + '/warmup']:
try:
os.mkdir(path)
except:
pass
if os.path.exists(save + '/init/chkpt.index'):
self.model.load_weights(save + '/init/chkpt')
print("Load weights from Lottery Draw initialization")
# Setup learning rate scheduler
self.compile(optimizer=Adam(s_lr), loss=loss, metrics=metrics)
lrate = LearningRateScheduler(self.warmup_scheduler, verbose=1)
self.w_epochs = epochs
self.w_lr = e_lr - s_lr
# Train the model
self.model.fit(x_train, y_train, epochs=epochs, batch_size=batch_size, verbose=1,
callbacks=[lrate])
if save is not None:
self.model.save_weights(save + '/warmup/chkpt')
with open(save + '/warmup/hp.json', 'w') as f:
data = {'s_lr': s_lr, 'e_lr': e_lr, 'epochs': epochs }
json.dump(data, f)
def pretext(self, x_train= None, zigsaw=9, epochs=10, batch_size=32, lr=0.001,
loss='mse', metrics=['mse'], save=None):
""" Pretrain using unsupervised pre-text task for zigsaw puzzle to learn essential features
x_train : training images
zigsaw : number of tiles in zigsaw puzzle
epochs : number of epochs for pretext task training
batch_size: batch size
lr : pre-text learning rate
loss : loss function
metrics : training metrics to report
save : file to save pretext weights
"""
print("\n*** Pretext Task (for essential features)")
if x_train is None:
x_train = self.x_train
# Load selected weight after hypertune
if save is not None:
for path in [ save, save + '/pretext']:
try:
os.mkdir(path)
except:
pass
if os.path.exists(save + '/tune/chkpt.index'):
self.model.load_weights(save + '/tune/chkpt')
elif os.path.exists(save + '/warmup/chkpt.index'):
self.model.load_weights(save + '/warmup/chkpt')
elif os.path.exists(save + '/init/chkpt.index'):
self.model.load_weights(save + '/init/chkpt')
if lr is None:
with open(save + '/tune/hp.json') as f:
data = json.load(f)
lr = data['lr']
batch_size = data['bs']
# Get the pooling layer before the final dense output layer
pooling = self.model.layers[len(self.model.layers)-2].output
# Attach a new top for the zigsaw puzzle
outputs = self.Dense(pooling, zigsaw)
self.relu = zigsaw
outputs = self.ReLU(outputs)
# Construct wrapper model with the new top layer
wrapper = Model(self.model.inputs, outputs)
wrapper.compile(loss=loss, optimizer=Adam(lr=lr), metrics=metrics)
# Rows/Columns
R = x_train.shape[1]
C = x_train.shape[2]
# Slicing
if zigsaw == 4:
M = int(x_train.shape[1] / 2)
N = int(x_train.shape[2] / 2)
ix = [0, 1, 2, 3]
elif zigsaw == 9:
M = int(x_train.shape[1] / 3)
N = int(x_train.shape[2] / 3)
ix = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
px_train = []
py_train = []
for _ in range(len(x_train)):
tiles = [x_train[_][x:x+M,y:y+N] for x in range(0,R,M) for y in range(0,C,N)]
random.shuffle(ix)
if zigsaw == 4:
r1 = np.concatenate((tiles[ix[0]], tiles[ix[1]]))
r2 = np.concatenate((tiles[ix[2]], tiles[ix[3]]))
image = np.concatenate((r1, r2), axis=1)
else:
r1 = np.concatenate((tiles[ix[0]], tiles[ix[1]], tiles[ix[2]]))
r2 = np.concatenate((tiles[ix[3]], tiles[ix[4]], tiles[ix[5]]))
r3 = np.concatenate((tiles[ix[6]], tiles[ix[7]], tiles[ix[8]]))
image = np.concatenate((r1, r2, r3), axis=1)
px_train.append(image)
py_train.append(ix)
px_train = np.asarray(px_train)
py_train = np.asarray(py_train)
# Train the model
wrapper.fit(px_train, py_train, epochs=epochs, batch_size=batch_size, verbose=1)
if save is not None:
self.model.save_weights(save + '/pretext/chkpt')
| apache-2.0 |
mblondel/scikit-learn | sklearn/feature_selection/tests/test_base.py | 170 | 3666 | import numpy as np
from scipy import sparse as sp
from nose.tools import assert_raises, assert_equal
from numpy.testing import assert_array_equal
from sklearn.base import BaseEstimator
from sklearn.feature_selection.base import SelectorMixin
from sklearn.utils import check_array
class StepSelector(SelectorMixin, BaseEstimator):
"""Retain every `step` features (beginning with 0)"""
def __init__(self, step=2):
self.step = step
def fit(self, X, y=None):
X = check_array(X, 'csc')
self.n_input_feats = X.shape[1]
return self
def _get_support_mask(self):
mask = np.zeros(self.n_input_feats, dtype=bool)
mask[::self.step] = True
return mask
support = [True, False] * 5
support_inds = [0, 2, 4, 6, 8]
X = np.arange(20).reshape(2, 10)
Xt = np.arange(0, 20, 2).reshape(2, 5)
Xinv = X.copy()
Xinv[:, 1::2] = 0
y = [0, 1]
feature_names = list('ABCDEFGHIJ')
feature_names_t = feature_names[::2]
feature_names_inv = np.array(feature_names)
feature_names_inv[1::2] = ''
def test_transform_dense():
sel = StepSelector()
Xt_actual = sel.fit(X, y).transform(X)
Xt_actual2 = StepSelector().fit_transform(X, y)
assert_array_equal(Xt, Xt_actual)
assert_array_equal(Xt, Xt_actual2)
# Check dtype matches
assert_equal(np.int32, sel.transform(X.astype(np.int32)).dtype)
assert_equal(np.float32, sel.transform(X.astype(np.float32)).dtype)
# Check 1d list and other dtype:
names_t_actual = sel.transform(feature_names)
assert_array_equal(feature_names_t, names_t_actual.ravel())
# Check wrong shape raises error
assert_raises(ValueError, sel.transform, np.array([[1], [2]]))
def test_transform_sparse():
sparse = sp.csc_matrix
sel = StepSelector()
Xt_actual = sel.fit(sparse(X)).transform(sparse(X))
Xt_actual2 = sel.fit_transform(sparse(X))
assert_array_equal(Xt, Xt_actual.toarray())
assert_array_equal(Xt, Xt_actual2.toarray())
# Check dtype matches
assert_equal(np.int32, sel.transform(sparse(X).astype(np.int32)).dtype)
assert_equal(np.float32, sel.transform(sparse(X).astype(np.float32)).dtype)
# Check wrong shape raises error
assert_raises(ValueError, sel.transform, np.array([[1], [2]]))
def test_inverse_transform_dense():
sel = StepSelector()
Xinv_actual = sel.fit(X, y).inverse_transform(Xt)
assert_array_equal(Xinv, Xinv_actual)
# Check dtype matches
assert_equal(np.int32,
sel.inverse_transform(Xt.astype(np.int32)).dtype)
assert_equal(np.float32,
sel.inverse_transform(Xt.astype(np.float32)).dtype)
# Check 1d list and other dtype:
names_inv_actual = sel.inverse_transform(feature_names_t)
assert_array_equal(feature_names_inv, names_inv_actual.ravel())
# Check wrong shape raises error
assert_raises(ValueError, sel.inverse_transform, np.array([[1], [2]]))
def test_inverse_transform_sparse():
sparse = sp.csc_matrix
sel = StepSelector()
Xinv_actual = sel.fit(sparse(X)).inverse_transform(sparse(Xt))
assert_array_equal(Xinv, Xinv_actual.toarray())
# Check dtype matches
assert_equal(np.int32,
sel.inverse_transform(sparse(Xt).astype(np.int32)).dtype)
assert_equal(np.float32,
sel.inverse_transform(sparse(Xt).astype(np.float32)).dtype)
# Check wrong shape raises error
assert_raises(ValueError, sel.inverse_transform, np.array([[1], [2]]))
def test_get_support():
sel = StepSelector()
sel.fit(X, y)
assert_array_equal(support, sel.get_support())
assert_array_equal(support_inds, sel.get_support(indices=True))
| bsd-3-clause |
datapythonista/pandas | pandas/core/internals/__init__.py | 3 | 1603 | from pandas.core.internals.api import make_block # pseudo-public version
from pandas.core.internals.array_manager import (
ArrayManager,
SingleArrayManager,
)
from pandas.core.internals.base import (
DataManager,
SingleDataManager,
)
from pandas.core.internals.blocks import ( # io.pytables, io.packers
Block,
DatetimeTZBlock,
ExtensionBlock,
NumericBlock,
ObjectBlock,
)
from pandas.core.internals.concat import concatenate_managers
from pandas.core.internals.managers import (
BlockManager,
SingleBlockManager,
create_block_manager_from_arrays,
create_block_manager_from_blocks,
)
__all__ = [
"Block",
"CategoricalBlock",
"NumericBlock",
"DatetimeTZBlock",
"ExtensionBlock",
"ObjectBlock",
"make_block",
"DataManager",
"ArrayManager",
"BlockManager",
"SingleDataManager",
"SingleBlockManager",
"SingleArrayManager",
"concatenate_managers",
# those two are preserved here for downstream compatibility (GH-33892)
"create_block_manager_from_arrays",
"create_block_manager_from_blocks",
]
def __getattr__(name: str):
import warnings
if name == "CategoricalBlock":
warnings.warn(
"CategoricalBlock is deprecated and will be removed in a future version. "
"Use ExtensionBlock instead.",
DeprecationWarning,
stacklevel=2,
)
from pandas.core.internals.blocks import CategoricalBlock
return CategoricalBlock
raise AttributeError(f"module 'pandas.core.internals' has no attribute '{name}'")
| bsd-3-clause |
isomerase/mozziesniff | roboskeeter/io/i_o.py | 2 | 6947 | # -*- coding: utf-8 -*-
"""
Created on Tue Apr 21 17:21:42 2015
@author: richard
"""
import os
import string
from Tkinter import Tk
from tkFileDialog import askdirectory
import numpy as np
import pandas as pd
def load_single_csv_to_df(csv_dir):
"""
Parameters
----------
csv_dir
str, full directory of csv
Returns
-------
pandas df
"""
col_labels = [ # TODO: check that Sharri's kinematics are the same as your kinematics
'position_x',
'position_y',
'position_z',
'velocity_x',
'velocity_y',
'velocity_z',
'acceleration_x',
'acceleration_y',
'acceleration_z',
'heading_angleS',
'angular_velo_xyS',
'angular_velo_yzS',
'curvatureS'
]
# map string "NaN" to np.nan
# header=None is needed to make sure dtype float is assigned properly, apparently
dataframe = pd.read_csv(csv_dir, na_values="NaN", names=col_labels, header=None, dtype=np.float32)
dataframe.fillna(value=0, inplace=True) # TODO: there shouldn't be NaNs in data
df_len = len(dataframe.position_x)
# take fname number
fname = os.path.split(csv_dir)[1]
fname_num = extract_number_from_fname(fname)
dataframe['trajectory_num'] = [fname_num] * df_len
dataframe['tsi'] = np.arange(df_len)
# plume related stuff will get set inside Experiment()
return dataframe
def experiment_condition_to_DF(experimental_condition):
"""
Given an experimental condition, load the appropriate dataset into a dataframe.
Parameters
----------
experimental_condition
(string)
Control, Left, or Right, or list thereof
Returns
-------
df
"""
if type(experimental_condition) is str:
experimental_condition = [experimental_condition]
experimental_condition = [string.upper(i) for i in experimental_condition]
df_list = []
for condition in experimental_condition:
dir_label = "EXP_TRAJECTORIES_" + condition
directory = get_directory(dir_label)
for fname in [f for f in os.listdir(directory) if os.path.isfile(os.path.join(directory, f))]: # list files
print "Loading {} from {}".format(fname, directory)
file_path = os.path.join(directory, fname)
dataframe = load_single_csv_to_df(file_path)
df_list.append(dataframe)
df = pd.concat(df_list)
return df
def get_csv_name_list(path, relative=True):
if relative:
return os.listdir(os.path.join(os.path.realpath('.'), path))
else:
return os.listdir(path)
def get_csv_filepath_list(path, csv_list):
"""
Parameters
----------
path
csv_list
Returns
-------
"""
paths = [os.path.join(path, fname) for fname in csv_list]
return paths
def get_directory(selection=None):
"""Centralized func to define directories, or select using dialog box
In:
Selection
None, open dialog box
PROJECT_PATH = os.path.dirname(trajectory.__file__)
MODEL_PATH = os.path.join(PROJECT_PATH, 'data', 'model')
EXPERIMENT_PATH = os.path.join(PROJECT_PATH, 'data', 'experiments')
CONTROL_EXP_PATH = os.path.join(EXPERIMENT_PATH, 'control_processed_and_filtered')
Out:
directory path
"""
dirname = os.path.dirname
PROJECT_PATH = dirname(dirname(dirname(__file__)))
EXPERIMENTS_PATH = os.path.join(PROJECT_PATH, 'data', 'experiments')
MODEL_PATH = os.path.join(PROJECT_PATH, 'data', 'model')
EXPERIMENTAL_TRAJECTORIES = os.path.join(EXPERIMENTS_PATH, 'trajectories')
EXP_TRAJECTORIES_CONTROL = os.path.join(EXPERIMENTAL_TRAJECTORIES, 'control')
EXP_TRAJECTORIES_LEFT = os.path.join(EXPERIMENTAL_TRAJECTORIES, 'left')
EXP_TRAJECTORIES_RIGHT = os.path.join(EXPERIMENTAL_TRAJECTORIES, 'right')
TEMPERATURES_PATH = os.path.join(EXPERIMENTS_PATH, 'temperature')
RAW = os.path.join(TEMPERATURES_PATH, 'raw-data')
TIMEAVG = os.path.join(TEMPERATURES_PATH, 'timeavg-data')
VAR = os.path.join(TEMPERATURES_PATH, 'variance-model')
BOOL = os.path.join(TEMPERATURES_PATH, 'boolean-model')
VAR_LEFT_CSV = os.path.join(VAR, 'left', 'LeftplumeVar_nonan.csv')
VAR_RIGHT_CSV = os.path.join(VAR, 'right', 'RightplumeVar_nonan.csv')
THERMOCOUPLE_RAW_LEFT_CSV = os.path.join(RAW, 'left', 'raw_left.csv')
THERMOCOUPLE_RAW_RIGHT_CSV = os.path.join(RAW, 'right', 'raw_right.csv')
THERMOCOUPLE_TIMEAVG_LEFT_CSV = os.path.join(TIMEAVG, 'left', 'timeavg_left.csv')
THERMOCOUPLE_TIMEAVG_LEFT_PADDED_CSV = os.path.join(TIMEAVG, 'left', 'timeavg_left_padded.csv')
THERMOCOUPLE_TIMEAVG_LEFT_INTERPOLATED_CSV = os.path.join(TIMEAVG, 'left', 'timeavg_left_interpolated.csv')
THERMOCOUPLE_TIMEAVG_RIGHT_CSV = os.path.join(TIMEAVG, 'right', 'timeavg_right.csv')
THERMOCOUPLE_TIMEAVG_RIGHT_PADDED_CSV = os.path.join(TIMEAVG, 'right', 'timeavg_right_padded.csv')
THERMOCOUPLE_TIMEAVG_RIGHT_INTERPOLATED_CSV = os.path.join(TIMEAVG, 'right', 'timeavg_right_interpolated.csv')
BOOL_LEFT_CSV = os.path.join(BOOL, 'left', 'left_plume_bounds.csv')
BOOL_RIGHT_CSV = os.path.join(BOOL, 'right', 'right_plume_bounds.csv')
dirs = {
'PROJECT_PATH': PROJECT_PATH,
'MODEL_PATH': MODEL_PATH,
'EXPERIMENT_PATH': EXPERIMENTS_PATH,
'EXPERIMENTAL_TRAJECTORIES': EXPERIMENTAL_TRAJECTORIES,
'EXP_TRAJECTORIES_CONTROL': EXP_TRAJECTORIES_CONTROL,
'EXP_TRAJECTORIES_LEFT': EXP_TRAJECTORIES_LEFT,
'EXP_TRAJECTORIES_RIGHT': EXP_TRAJECTORIES_RIGHT,
'THERMOCOUPLE_RAW_LEFT': THERMOCOUPLE_RAW_LEFT_CSV,
'THERMOCOUPLE_TIMEAVG_LEFT_PADDED_CSV': THERMOCOUPLE_TIMEAVG_LEFT_PADDED_CSV,
'THERMOCOUPLE_TIMEAVG_LEFT_INTERPOLATED_CSV': THERMOCOUPLE_TIMEAVG_LEFT_INTERPOLATED_CSV,
'THERMOCOUPLE_RAW_RIGHT': THERMOCOUPLE_RAW_RIGHT_CSV,
'THERMOCOUPLE_TIMEAVG_RIGHT_PADDED_CSV': THERMOCOUPLE_TIMEAVG_RIGHT_PADDED_CSV,
'THERMOCOUPLE_TIMEAVG_RIGHT_INTERPOLATED_CSV': THERMOCOUPLE_TIMEAVG_RIGHT_INTERPOLATED_CSV,
'THERMOCOUPLE_TIMEAVG_LEFT_CSV': THERMOCOUPLE_TIMEAVG_LEFT_CSV,
'THERMOCOUPLE_TIMEAVG_RIGHT_CSV': THERMOCOUPLE_TIMEAVG_RIGHT_CSV,
'BOOL_LEFT_CSV': BOOL_LEFT_CSV,
'BOOL_RIGHT_CSV': BOOL_RIGHT_CSV,
'VAR_LEFT_CSV': VAR_LEFT_CSV,
'VAR_RIGHT_CSV': VAR_RIGHT_CSV
}
if selection is None:
print("Enter directory with experimental data")
Tk().withdraw()
directory = askdirectory()
else:
directory = dirs[selection]
print("Directory selected: {}".format(directory))
return directory
def extract_number_from_fname(token):
extract_digits = lambda stng: "".join(char for char in stng if char in string.digits)
to_int = lambda x: int(float(x)) if x.count(".") <= 1 else None
number = to_int(extract_digits(token))
return number
| mit |
chibill/EMCP | runtime/lib/tqdm/_tqdm_pandas.py | 5 | 1700 | # future division is important to divide integers and get as
# a result precise floating numbers (instead of truncated int)
from __future__ import absolute_import
__author__ = "github.com/casperdcl"
__all__ = ['tqdm_pandas']
def tqdm_pandas(t): # pragma: no cover
"""
Registers the given `tqdm` instance with
`pandas.core.groupby.DataFrameGroupBy.progress_apply`.
It will even close() the `tqdm` instance upon completion.
Examples
--------
>>> import pandas as pd
>>> import numpy as np
>>> from tqdm import tqdm, tqdm_pandas
>>>
>>> df = pd.DataFrame(np.random.randint(0, 100, (100000, 6)))
>>> tqdm_pandas(tqdm()) # can use tqdm_gui, optional kwargs, etc
>>> # Now you can use `progress_apply` instead of `apply`
>>> df.groupby(0).progress_apply(lambda x: x**2)
References
----------
https://stackoverflow.com/questions/18603270/
progress-indicator-during-pandas-operations-python
"""
from pandas.core.groupby import DataFrameGroupBy
def inner(groups, func, *args, **kwargs):
"""
Parameters
----------
groups : DataFrameGroupBy
Grouped data.
func : function
To be applied on the grouped data.
*args and *kwargs are transmitted to DataFrameGroupBy.apply()
"""
t.total = len(groups) + 1 # pandas calls update once too many
def wrapper(*args, **kwargs):
t.update()
return func(*args, **kwargs)
result = groups.apply(wrapper, *args, **kwargs)
t.close()
return result
# Enable custom tqdm progress in pandas!
DataFrameGroupBy.progress_apply = inner
| gpl-3.0 |
antoinecarme/pyaf | pyaf/TS/Scikit_Models.py | 1 | 10386 | import numpy as np
import pandas as pd
from . import SignalDecomposition_AR as tsar
from . import Utils as tsutil
import sys
class cAbstract_Scikit_Model(tsar.cAbstractAR):
def __init__(self , cycle_residue_name, P , iExogenousInfo = None):
super().__init__(cycle_residue_name, iExogenousInfo)
self.mNbLags = P;
self.mNbExogenousLags = P;
self.mScikitModel = None;
def dumpCoefficients(self, iMax=10):
# print(self.mScikitModel.__dict__);
pass
def build_Scikit_Model(self):
assert(0);
def set_name(self):
assert(0);
def is_used(self, name):
if(self.mFeatureSelector):
return (name in self.mInputNamesAfterSelection)
return True
def fit(self):
# print("ESTIMATE_SCIKIT_MODEL_START" , self.mCycleResidueName);
self.build_Scikit_Model();
self.set_name();
series = self.mCycleResidueName;
self.mTime = self.mTimeInfo.mTime;
self.mSignal = self.mTimeInfo.mSignal;
lAREstimFrame = self.mSplit.getEstimPart(self.mARFrame)
# print("mAREstimFrame columns :" , self.mAREstimFrame.columns);
lARInputs = lAREstimFrame[self.mInputNames].values
lARTarget = lAREstimFrame[series].values
# print(len(self.mInputNames), lARInputs.shape , lARTarget.shape)
assert(lARInputs.shape[1] > 0);
assert(lARTarget.shape[0] > 0);
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import f_regression
lMaxFeatures = self.mOptions.mMaxFeatureForAutoreg;
if(lMaxFeatures >= lARInputs.shape[1]):
lMaxFeatures = lARInputs.shape[1];
if(lMaxFeatures >= (lARInputs.shape[0] // 4)):
lMaxFeatures = lARInputs.shape[0] // 4;
self.mFeatureSelector = SelectKBest(f_regression, k= lMaxFeatures);
try:
self.mFeatureSelector.fit(lARInputs, lARTarget);
except Exception as e:
print("SCIKIT_MODEL_FEATURE_SELECTION_FAILURE" , self.mOutName, lARInputs.shape, e);
if(self.mOptions.mDebug):
df1 = pd.DataFrame(lARInputs);
df1.columns = self.mInputNames
df1['TGT'] = lARTarget;
# df1.to_csv("SCIKIT_MODEL_FEATURE_SELECTION_FAILURE.csv.gz" , compression='gzip');
# issue #72 : ignore feature selection in case of failure.
self.mFeatureSelector = None
if(self.mFeatureSelector):
lARInputsAfterSelection = self.mFeatureSelector.transform(lARInputs);
# print(self.mInputNames , self.mFeatureSelector.get_support(indices=True));
lSupport = self.mFeatureSelector.get_support(indices=True);
self.mInputNamesAfterSelection = [self.mInputNames[k] for k in lSupport];
else:
lARInputsAfterSelection = lARInputs;
self.mInputNamesAfterSelection = self.mInputNames;
self.mComplexity = len(self.mInputNamesAfterSelection)
assert(len(self.mInputNamesAfterSelection) == lARInputsAfterSelection.shape[1]);
# print("FEATURE_SELECTION" , self.mOutName, lARInputs.shape[1] , lARInputsAfterSelection.shape[1]);
del lARInputs;
try:
self.mScikitModel.fit(lARInputsAfterSelection, lARTarget)
except Exception as e:
print("SCIKIT_MODEL_FIT_FAILURE" , self.mOutName, lARInputsAfterSelection.shape, e);
if(self.mOptions.mDebug):
df1 = pd.DataFrame(lARInputsAfterSelection);
df1.columns = self.mInputNamesAfterSelection
df1['TGT'] = lARTarget;
# df1.to_csv("SCIKIT_MODEL_FIT_FAILURE.csv.gz" , compression='gzip');
del self.mScikitModel
self.mScikitModel = None;
del lARInputsAfterSelection;
del lARTarget;
del lAREstimFrame;
if(self.mScikitModel is not None):
lFullARInputs = self.mARFrame[self.mInputNames].values;
lFullARInputsAfterSelection = self.mFeatureSelector.transform(lFullARInputs) if self.mFeatureSelector else lFullARInputs;
lPredicted = self.mScikitModel.predict(lFullARInputsAfterSelection);
self.mARFrame[self.mOutName] = lPredicted
else:
# issue_34 failure SVD does not converge
self.mARFrame[self.mOutName] = self.mDefaultValues[series]
self.mARFrame[self.mOutName + '_residue'] = self.mARFrame[series] - self.mARFrame[self.mOutName]
# print("ESTIMATE_SCIKIT_MODEL_END" , self.mOutName);
def transformDataset(self, df, horizon_index = 1):
series = self.mCycleResidueName;
if(self.mExogenousInfo is not None):
df = self.mExogenousInfo.transformDataset(df);
# print(df.columns);
# print(df.info());
# print(df.head());
# print(df.tail());
lag_df = self.generateLagsForForecast(df);
# print(self.mInputNames);
# print(self.mFormula, "\n", lag_df.columns);
# lag_df.to_csv("LAGGED_ " + str(self.mNbLags) + ".csv");
# print(len(list(lag_df.columns)) , len(self.mInputNamesAfterSelection))
inputs_after_feat_selection = lag_df.values[:,1:] # the first column is the signal
# inputs_after_feat_selection = self.mFeatureSelector.transform(inputs) if self.mFeatureSelector else inputs;
if(self.mScikitModel is not None):
pred = self.mScikitModel.predict(inputs_after_feat_selection)
df[self.mOutName] = pred;
else:
df[self.mOutName] = self.mDefaultValues[series];
target = df[series].values
df[self.mOutName + '_residue'] = target - df[self.mOutName].values
return df;
class cAutoRegressiveModel(cAbstract_Scikit_Model):
def __init__(self , cycle_residue_name, P , iExogenousInfo = None):
super().__init__(cycle_residue_name, P, iExogenousInfo)
self.mComplexity = P;
def dumpCoefficients(self, iMax=10):
logger = tsutil.get_pyaf_logger();
lDict = dict(zip(self.mInputNamesAfterSelection , self.mScikitModel.coef_));
lDict1 = dict(zip(self.mInputNamesAfterSelection , abs(self.mScikitModel.coef_)));
i = 1;
lOrderedVariables = sorted(lDict1.keys(), key=lDict1.get, reverse=True);
for k in lOrderedVariables[0:iMax]:
logger.info("AR_MODEL_COEFF " + str(i) + " " + str(k) + " " + str(lDict[k]));
i = i + 1;
def build_Scikit_Model(self):
import sklearn.linear_model as linear_model
# issue_22 : warning about singular matrix => change the solver by default.
self.mScikitModel = linear_model.Ridge(solver='svd')
def set_name(self):
self.mOutName = self.mCycleResidueName + '_AR(' + str(self.mNbLags) + ")";
self.mFormula = "AR" # (" + str(self.mNbLags) + ")";
if(self.mExogenousInfo is not None):
self.mOutName = self.mCycleResidueName + '_ARX(' + str(self.mNbLags) + ")";
self.mFormula = "ARX" # (" + str(self.mNbExogenousLags) + ")";
class cSVR_Model(cAbstract_Scikit_Model):
def __init__(self , cycle_residue_name, P , iExogenousInfo = None):
super().__init__(cycle_residue_name, P, iExogenousInfo)
self.mComplexity = 2*P;
def dumpCoefficients(self, iMax=10):
pass
def build_Scikit_Model(self):
import sklearn.svm as svm
self.mScikitModel = svm.SVR(kernel='rbf', gamma='scale')
def set_name(self):
self.mOutName = self.mCycleResidueName + '_SVR(' + str(self.mNbLags) + ")";
self.mFormula = "SVR" # (" + str(self.mNbLags) + ")";
if(self.mExogenousInfo is not None):
self.mOutName = self.mCycleResidueName + '_SVRX(' + str(self.mNbLags) + ")";
self.mFormula = "SVRX" # (" + str(self.mNbExogenousLags) + ")";
class cXGBoost_Model(cAbstract_Scikit_Model):
def __init__(self , cycle_residue_name, P , iExogenousInfo = None):
super().__init__(cycle_residue_name, P, iExogenousInfo)
self.mComplexity = 2*P;
def dumpCoefficients(self, iMax=10):
pass
def get_default_xgb_options(self):
lXGBOptions = dict(n_estimators=10,
nthread=1,
min_child_weight=10,
max_depth=3,
seed=self.mOptions.mSeed)
return lXGBOptions
def build_Scikit_Model(self):
import xgboost as xgb
lXGBOptions = self.mOptions.mXGBOptions;
if(lXGBOptions is None):
lXGBOptions = self.get_default_xgb_options()
self.mScikitModel = xgb.XGBRegressor(**lXGBOptions)
def set_name(self):
self.mOutName = self.mCycleResidueName + '_XGB(' + str(self.mNbLags) + ")";
self.mFormula = "XGB" # + str(self.mNbLags) + ")";
if(self.mExogenousInfo is not None):
self.mOutName = self.mCycleResidueName + '_XGBX(' + str(self.mNbLags) + ")";
self.mFormula = "XGBX" # (" + str(self.mNbExogenousLags) + ")";
class cLightGBM_Model(cAbstract_Scikit_Model):
def __init__(self , cycle_residue_name, P , iExogenousInfo = None):
super().__init__(cycle_residue_name, P, iExogenousInfo)
self.mComplexity = 2*P;
def dumpCoefficients(self, iMax=10):
pass
def get_default_lgbm_options(self):
lLGBMOptions = dict(objective='regression',
n_estimators=32,
random_state=self.mOptions.mSeed)
return lLGBMOptions
def build_Scikit_Model(self):
import lightgbm as lgb
lLGBMOptions = self.mOptions.mLGBMOptions;
if(lLGBMOptions is None):
lLGBMOptions = self.get_default_lgbm_options()
self.mScikitModel = lgb.LGBMRegressor(**lLGBMOptions)
def set_name(self):
self.mOutName = self.mCycleResidueName + '_LGB(' + str(self.mNbLags) + ")";
self.mFormula = "LGB" # + str(self.mNbLags) + ")";
if(self.mExogenousInfo is not None):
self.mOutName = self.mCycleResidueName + '_LGBX(' + str(self.mNbLags) + ")";
self.mFormula = "LGBX" # (" + str(self.mNbExogenousLags) + ")";
| bsd-3-clause |
damaggu/SAMRI | samri/pipelines/preprocess.py | 1 | 17941 | from os import path, listdir, getcwd, remove
from samri.pipelines.extra_functions import get_data_selection, get_scan, write_bids_metadata_file, write_events_file, force_dummy_scans, BIDS_METADATA_EXTRACTION_DICTS
import re
import inspect
import shutil
from copy import deepcopy
from itertools import product
import nipype.interfaces.ants as ants
import nipype.interfaces.io as nio
import nipype.interfaces.utility as util # utility
import nipype.pipeline.engine as pe # pypeline engine
import pandas as pd
from nipype.interfaces import afni, bru2nii, fsl, nipy
from samri.pipelines.nodes import *
from samri.pipelines.utils import bids_naming, ss_to_path, sss_filename, fslmaths_invert_values
from samri.utilities import N_PROCS
from samri.fetch.templates import fetch_rat_waxholm, fetch_mouse_DSURQE
DUMMY_SCANS=10
N_PROCS=max(N_PROCS-4, 2)
#set all outputs to compressed NIfTI
afni.base.AFNICommand.set_default_output_type('NIFTI_GZ')
fsl.FSLCommand.set_default_output_type('NIFTI_GZ')
def bruker(measurements_base, template,
DEBUG=False,
exclude={},
functional_match={},
structural_match={},
sessions=[],
subjects=[],
actual_size=True,
functional_blur_xy=False,
functional_registration_method="structural",
highpass_sigma=225,
lowpass_sigma=None,
negative_contrast_agent=False,
n_procs=N_PROCS,
realign="time",
registration_mask=False,
tr=1,
very_nasty_bruker_delay_hack=False,
workflow_name="generic",
keep_work=False,
autorotate=False,
strict=False,
verbose=False,
):
'''
realign: {"space","time","spacetime",""}
Parameter that dictates slictiming correction and realignment of slices. "time" (FSL.SliceTimer) is default, since it works safely. Use others only with caution!
'''
if template:
if template == "mouse":
template = fetch_mouse_DSURQE()['template']
registration_mask = fetch_mouse_DSURQE()['mask']
elif template == "rat":
template = fetch_rat_waxholm()['template']
registration_mask = fetch_rat_waxholm()['mask']
else:
pass
else:
raise ValueError("No species or template specified")
return -1
measurements_base = path.abspath(path.expanduser(measurements_base))
# add subject and session filters if present
if subjects:
structural_scan_types['subject'] = subjects
if sessions:
structural_scan_types['session'] = sessions
# define measurement directories to be processed, and populate the list either with the given include_measurements, or with an intelligent selection
data_selection = pd.DataFrame([])
if structural_match:
s_data_selection = get_data_selection(measurements_base,
match=structural_match,
exclude=exclude,
)
structural_scan_types = s_data_selection['scan_type'].unique()
data_selection = pd.concat([data_selection,s_data_selection])
if functional_match:
f_data_selection = get_data_selection(measurements_base,
match=functional_match,
exclude=exclude,
)
functional_scan_types = f_data_selection['scan_type'].unique()
data_selection = pd.concat([data_selection,f_data_selection])
# we currently only support one structural scan type per session
#if functional_registration_method in ("structural", "composite") and structural_scan_types:
# structural_scan_types = [structural_scan_types[0]]
# we start to define nipype workflow elements (nodes, connections, meta)
subjects_sessions = data_selection[["subject","session"]].drop_duplicates().values.tolist()
if debug:
print('Data selection:')
print(data_selection)
print('Iterating over:')
print(subjects_sessions)
infosource = pe.Node(interface=util.IdentityInterface(fields=['subject_session'], mandatory_inputs=False), name="infosource")
infosource.iterables = [('subject_session', subjects_sessions)]
get_f_scan = pe.Node(name='get_f_scan', interface=util.Function(function=get_scan,input_names=inspect.getargspec(get_scan)[0], output_names=['scan_path','scan_type','trial']))
if not strict:
get_f_scan.inputs.ignore_exception = True
get_f_scan.inputs.data_selection = data_selection
get_f_scan.inputs.measurements_base = measurements_base
get_f_scan.iterables = ("scan_type", functional_scan_types)
f_bru2nii = pe.Node(interface=bru2nii.Bru2(), name="f_bru2nii")
f_bru2nii.inputs.actual_size=actual_size
dummy_scans = pe.Node(name='dummy_scans', interface=util.Function(function=force_dummy_scans,input_names=inspect.getargspec(force_dummy_scans)[0], output_names=['out_file']))
dummy_scans.inputs.desired_dummy_scans = DUMMY_SCANS
bandpass = pe.Node(interface=fsl.maths.TemporalFilter(), name="bandpass")
bandpass.inputs.highpass_sigma = highpass_sigma
if lowpass_sigma:
bandpass.inputs.lowpass_sigma = lowpass_sigma
else:
bandpass.inputs.lowpass_sigma = tr
#bids_filename = pe.Node(name='bids_filename', interface=util.Function(function=sss_filename,input_names=inspect.getargspec(sss_filename)[0], output_names=['filename']))
bids_filename = pe.Node(name='bids_filename', interface=util.Function(function=bids_naming,input_names=inspect.getargspec(bids_naming)[0], output_names=['filename']))
bids_filename.inputs.metadata = data_selection
#bids_stim_filename = pe.Node(name='bids_stim_filename', interface=util.Function(function=sss_filename,input_names=inspect.getargspec(sss_filename)[0], output_names=['filename']))
bids_stim_filename = pe.Node(name='bids_stim_filename', interface=util.Function(function=bids_naming,input_names=inspect.getargspec(bids_naming)[0], output_names=['filename']))
bids_stim_filename.inputs.suffix = "events"
bids_stim_filename.inputs.extension = ".tsv"
bids_stim_filename.inputs.metadata = data_selection
events_file = pe.Node(name='events_file', interface=util.Function(function=write_events_file,input_names=inspect.getargspec(write_events_file)[0], output_names=['out_file']))
events_file.inputs.dummy_scans_ms = DUMMY_SCANS * tr * 1000
events_file.inputs.stim_protocol_dictionary = STIM_PROTOCOL_DICTIONARY
events_file.inputs.very_nasty_bruker_delay_hack = very_nasty_bruker_delay_hack
if not (strict or verbose):
events_file.inputs.ignore_exception = True
datasink = pe.Node(nio.DataSink(), name='datasink')
datasink.inputs.base_directory = path.join(measurements_base,"preprocessing",workflow_name)
datasink.inputs.parameterization = False
if not (strict or verbose):
datasink.inputs.ignore_exception = True
workflow_connections = [
(infosource, get_f_scan, [('subject_session', 'selector')]),
(infosource, bids_stim_filename, [('subject_session', 'subject_session')]),
(get_f_scan, bids_stim_filename, [('scan_type', 'scan_type')]),
(get_f_scan, f_bru2nii, [('scan_path', 'input_dir')]),
(f_bru2nii, dummy_scans, [('nii_file', 'in_file')]),
(get_f_scan, dummy_scans, [('scan_path', 'scan_dir')]),
(get_f_scan, events_file, [
('trial', 'trial'),
('scan_path', 'scan_dir')
]),
(events_file, datasink, [('out_file', 'func.@events')]),
(bids_stim_filename, events_file, [('filename', 'out_file')]),
(infosource, datasink, [(('subject_session',ss_to_path), 'container')]),
(infosource, bids_filename, [('subject_session', 'subject_session')]),
(get_f_scan, bids_filename, [('scan_type', 'scan_type')]),
(bids_filename, bandpass, [('filename', 'out_file')]),
(bandpass, datasink, [('out_file', 'func')]),
]
if realign == "space":
realigner = pe.Node(interface=spm.Realign(), name="realigner")
realigner.inputs.register_to_mean = True
workflow_connections.extend([
(dummy_scans, realigner, [('out_file', 'in_file')]),
])
elif realign == "spacetime":
realigner = pe.Node(interface=nipy.SpaceTimeRealigner(), name="realigner")
realigner.inputs.slice_times = "asc_alt_2"
realigner.inputs.tr = tr
realigner.inputs.slice_info = 3 #3 for coronal slices (2 for horizontal, 1 for sagittal)
workflow_connections.extend([
(dummy_scans, realigner, [('out_file', 'in_file')]),
])
elif realign == "time":
realigner = pe.Node(interface=fsl.SliceTimer(), name="slicetimer")
realigner.inputs.time_repetition = tr
workflow_connections.extend([
(dummy_scans, realigner, [('out_file', 'in_file')]),
])
#ADDING SELECTABLE NODES AND EXTENDING WORKFLOW AS APPROPRIATE:
if actual_size:
s_biascorrect, f_biascorrect = real_size_nodes()
else:
s_biascorrect, f_biascorrect = inflated_size_nodes()
if structural_scan_types.any():
get_s_scan = pe.Node(name='get_s_scan', interface=util.Function(function=get_scan, input_names=inspect.getargspec(get_scan)[0], output_names=['scan_path','scan_type','trial']))
if not strict:
get_s_scan.inputs.ignore_exception = True
get_s_scan.inputs.data_selection = data_selection
get_s_scan.inputs.measurements_base = measurements_base
get_s_scan.iterables = ("scan_type", structural_scan_types)
s_bru2nii = pe.Node(interface=bru2nii.Bru2(), name="s_bru2nii")
s_bru2nii.inputs.force_conversion=True
s_bru2nii.inputs.actual_size=actual_size
#s_bids_filename = pe.Node(name='s_bids_filename', interface=util.Function(function=sss_filename,input_names=inspect.getargspec(sss_filename)[0], output_names=['filename']))
s_bids_filename = pe.Node(name='s_bids_filename', interface=util.Function(function=bids_naming,input_names=inspect.getargspec(bids_naming)[0], output_names=['filename']))
s_bids_filename.inputs.metadata = data_selection
if actual_size:
s_register, s_warp, _, _ = DSURQEc_structural_registration(template, registration_mask)
#TODO: incl. in func registration
if autorotate:
workflow_connections.extend([
(s_biascorrect, s_rotated, [('output_image', 'out_file')]),
(s_rotated, s_register, [('out_file', 'moving_image')]),
])
else:
workflow_connections.extend([
(s_biascorrect, s_register, [('output_image', 'moving_image')]),
(s_register, s_warp, [('composite_transform', 'transforms')]),
(s_bru2nii, s_warp, [('nii_file', 'input_image')]),
(s_warp, datasink, [('output_image', 'anat')]),
])
else:
s_reg_biascorrect = pe.Node(interface=ants.N4BiasFieldCorrection(), name="s_reg_biascorrect")
s_reg_biascorrect.inputs.dimension = 3
s_reg_biascorrect.inputs.bspline_fitting_distance = 95
s_reg_biascorrect.inputs.shrink_factor = 2
s_reg_biascorrect.inputs.n_iterations = [500,500,500,500]
s_reg_biascorrect.inputs.convergence_threshold = 1e-14
s_cutoff = pe.Node(interface=fsl.ImageMaths(), name="s_cutoff")
s_cutoff.inputs.op_string = "-thrP 20 -uthrp 98"
s_BET = pe.Node(interface=fsl.BET(), name="s_BET")
s_BET.inputs.mask = True
s_BET.inputs.frac = 0.3
s_BET.inputs.robust = True
s_mask = pe.Node(interface=fsl.ApplyMask(), name="s_mask")
s_register, s_warp, f_warp = structural_registration(template)
workflow_connections.extend([
(s_bru2nii, s_reg_biascorrect, [('nii_file', 'input_image')]),
(s_reg_biascorrect, s_cutoff, [('output_image', 'in_file')]),
(s_cutoff, s_BET, [('out_file', 'in_file')]),
(s_biascorrect, s_mask, [('output_image', 'in_file')]),
(s_BET, s_mask, [('mask_file', 'mask_file')]),
])
#TODO: incl. in func registration
if autorotate:
workflow_connections.extend([
(s_mask, s_rotated, [('out_file', 'out_file')]),
(s_rotated, s_register, [('out_file', 'moving_image')]),
])
else:
workflow_connections.extend([
(s_mask, s_register, [('out_file', 'moving_image')]),
(s_register, s_warp, [('composite_transform', 'transforms')]),
(s_bru2nii, s_warp, [('nii_file', 'input_image')]),
(s_warp, datasink, [('output_image', 'anat')]),
])
if autorotate:
s_rotated = autorotate(template)
workflow_connections.extend([
(infosource, get_s_scan, [('subject_session', 'selector')]),
(infosource, s_bids_filename, [('subject_session', 'subject_session')]),
(get_s_scan, s_bru2nii, [('scan_path','input_dir')]),
(get_s_scan, s_bids_filename, [('scan_type', 'scan_type')]),
(s_bids_filename, s_warp, [('filename','output_image')]),
(s_bru2nii, s_biascorrect, [('nii_file', 'input_image')]),
])
if functional_registration_method == "structural":
if not structural_scan_types:
raise ValueError('The option `registration="structural"` requires there to be a structural scan type.')
workflow_connections.extend([
(s_register, f_warp, [('composite_transform', 'transforms')]),
])
if realign == "space":
workflow_connections.extend([
(realigner, f_warp, [('realigned_files', 'input_image')]),
])
elif realign == "spacetime":
workflow_connections.extend([
(realigner, f_warp, [('out_file', 'input_image')]),
])
elif realign == "time":
workflow_connections.extend([
(realigner, f_warp, [('slice_time_corrected_file', 'input_image')]),
])
else:
workflow_connections.extend([
(dummy_scans, f_warp, [('out_file', 'input_image')]),
])
if functional_registration_method == "composite":
if not structural_scan_types.any():
raise ValueError('The option `registration="composite"` requires there to be a structural scan type.')
_, _, f_register, f_warp = DSURQEc_structural_registration(template, registration_mask)
temporal_mean = pe.Node(interface=fsl.MeanImage(), name="temporal_mean")
merge = pe.Node(util.Merge(2), name='merge')
workflow_connections.extend([
(temporal_mean, f_biascorrect, [('out_file', 'input_image')]),
(f_biascorrect, f_register, [('output_image', 'moving_image')]),
(s_biascorrect, f_register, [('output_image', 'fixed_image')]),
(f_register, merge, [('composite_transform', 'in1')]),
(s_register, merge, [('composite_transform', 'in2')]),
(merge, f_warp, [('out', 'transforms')]),
])
if realign == "space":
workflow_connections.extend([
(realigner, temporal_mean, [('realigned_files', 'in_file')]),
(realigner, f_warp, [('realigned_files', 'input_image')]),
])
elif realign == "spacetime":
workflow_connections.extend([
(realigner, temporal_mean, [('out_file', 'in_file')]),
(realigner, f_warp, [('out_file', 'input_image')]),
])
elif realign == "time":
workflow_connections.extend([
(realigner, temporal_mean, [('slice_time_corrected_file', 'in_file')]),
(realigner, f_warp, [('slice_time_corrected_file', 'input_image')]),
])
else:
workflow_connections.extend([
(dummy_scans, temporal_mean, [('out_file', 'in_file')]),
(dummy_scans, f_warp, [('out_file', 'input_image')]),
])
elif functional_registration_method == "functional":
f_register, f_warp = functional_registration(template)
temporal_mean = pe.Node(interface=fsl.MeanImage(), name="temporal_mean")
#f_cutoff = pe.Node(interface=fsl.ImageMaths(), name="f_cutoff")
#f_cutoff.inputs.op_string = "-thrP 30"
#f_BET = pe.Node(interface=fsl.BET(), name="f_BET")
#f_BET.inputs.mask = True
#f_BET.inputs.frac = 0.5
workflow_connections.extend([
(temporal_mean, f_biascorrect, [('out_file', 'input_image')]),
#(f_biascorrect, f_cutoff, [('output_image', 'in_file')]),
#(f_cutoff, f_BET, [('out_file', 'in_file')]),
#(f_BET, f_register, [('out_file', 'moving_image')]),
(f_biascorrect, f_register, [('output_image', 'moving_image')]),
(f_register, f_warp, [('composite_transform', 'transforms')]),
])
if realign == "space":
workflow_connections.extend([
(realigner, temporal_mean, [('realigned_files', 'in_file')]),
(realigner, f_warp, [('realigned_files', 'input_image')]),
])
elif realign == "spacetime":
workflow_connections.extend([
(realigner, temporal_mean, [('out_file', 'in_file')]),
(realigner, f_warp, [('out_file', 'input_image')]),
])
elif realign == "time":
workflow_connections.extend([
(realigner, temporal_mean, [('slice_time_corrected_file', 'in_file')]),
(realigner, f_warp, [('slice_time_corrected_file', 'input_image')]),
])
else:
workflow_connections.extend([
(dummy_scans, temporal_mean, [('out_file', 'in_file')]),
(dummy_scans, f_warp, [('out_file', 'input_image')]),
])
invert = pe.Node(interface=fsl.ImageMaths(), name="invert")
if functional_blur_xy and negative_contrast_agent:
blur = pe.Node(interface=afni.preprocess.BlurToFWHM(), name="blur")
blur.inputs.fwhmxy = functional_blur_xy
workflow_connections.extend([
(f_warp, blur, [('output_image', 'in_file')]),
(blur, invert, [(('out_file', fslmaths_invert_values), 'op_string')]),
(blur, invert, [('out_file', 'in_file')]),
(invert, bandpass, [('out_file', 'in_file')]),
])
elif functional_blur_xy:
blur = pe.Node(interface=afni.preprocess.BlurToFWHM(), name="blur")
blur.inputs.fwhmxy = functional_blur_xy
workflow_connections.extend([
(f_warp, blur, [('output_image', 'in_file')]),
(blur, bandpass, [('out_file', 'in_file')]),
])
elif negative_contrast_agent:
blur = pe.Node(interface=afni.preprocess.BlurToFWHM(), name="blur")
blur.inputs.fwhmxy = functional_blur_xy
workflow_connections.extend([
(f_warp, invert, [(('output_image', fslmaths_invert_values), 'op_string')]),
(f_warp, invert, [('output_image', 'in_file')]),
(invert, bandpass, [('out_file', 'in_file')]),
])
else:
workflow_connections.extend([
(f_warp, bandpass, [('output_image', 'in_file')]),
])
workflow_config = {'execution': {'crashdump_dir': path.join(measurements_base,'preprocessing/crashdump'),}}
if debug:
workflow_config['logging'] = {
'workflow_level':'DEBUG',
'utils_level':'DEBUG',
'interface_level':'DEBUG',
'filemanip_level':'DEBUG',
'log_to_file':'true',
}
workdir_name = workflow_name+"_work"
workflow = pe.Workflow(name=workdir_name)
workflow.connect(workflow_connections)
workflow.base_dir = path.join(measurements_base,"preprocessing")
workflow.config = workflow_config
workflow.write_graph(dotfilename=path.join(workflow.base_dir,workdir_name,"graph.dot"), graph2use="hierarchical", format="png")
workflow.run(plugin="MultiProc", plugin_args={'n_procs' : n_procs})
if not keep_work:
shutil.rmtree(path.join(workflow.base_dir,workdir_name))
| gpl-3.0 |
kagayakidan/scikit-learn | sklearn/metrics/cluster/tests/test_supervised.py | 206 | 7643 | import numpy as np
from sklearn.metrics.cluster import adjusted_rand_score
from sklearn.metrics.cluster import homogeneity_score
from sklearn.metrics.cluster import completeness_score
from sklearn.metrics.cluster import v_measure_score
from sklearn.metrics.cluster import homogeneity_completeness_v_measure
from sklearn.metrics.cluster import adjusted_mutual_info_score
from sklearn.metrics.cluster import normalized_mutual_info_score
from sklearn.metrics.cluster import mutual_info_score
from sklearn.metrics.cluster import expected_mutual_information
from sklearn.metrics.cluster import contingency_matrix
from sklearn.metrics.cluster import entropy
from sklearn.utils.testing import assert_raise_message
from nose.tools import assert_almost_equal
from nose.tools import assert_equal
from numpy.testing import assert_array_almost_equal
score_funcs = [
adjusted_rand_score,
homogeneity_score,
completeness_score,
v_measure_score,
adjusted_mutual_info_score,
normalized_mutual_info_score,
]
def test_error_messages_on_wrong_input():
for score_func in score_funcs:
expected = ('labels_true and labels_pred must have same size,'
' got 2 and 3')
assert_raise_message(ValueError, expected, score_func,
[0, 1], [1, 1, 1])
expected = "labels_true must be 1D: shape is (2"
assert_raise_message(ValueError, expected, score_func,
[[0, 1], [1, 0]], [1, 1, 1])
expected = "labels_pred must be 1D: shape is (2"
assert_raise_message(ValueError, expected, score_func,
[0, 1, 0], [[1, 1], [0, 0]])
def test_perfect_matches():
for score_func in score_funcs:
assert_equal(score_func([], []), 1.0)
assert_equal(score_func([0], [1]), 1.0)
assert_equal(score_func([0, 0, 0], [0, 0, 0]), 1.0)
assert_equal(score_func([0, 1, 0], [42, 7, 42]), 1.0)
assert_equal(score_func([0., 1., 0.], [42., 7., 42.]), 1.0)
assert_equal(score_func([0., 1., 2.], [42., 7., 2.]), 1.0)
assert_equal(score_func([0, 1, 2], [42, 7, 2]), 1.0)
def test_homogeneous_but_not_complete_labeling():
# homogeneous but not complete clustering
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 0, 1, 1, 1],
[0, 0, 0, 1, 2, 2])
assert_almost_equal(h, 1.00, 2)
assert_almost_equal(c, 0.69, 2)
assert_almost_equal(v, 0.81, 2)
def test_complete_but_not_homogeneous_labeling():
# complete but not homogeneous clustering
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 1, 1, 2, 2],
[0, 0, 1, 1, 1, 1])
assert_almost_equal(h, 0.58, 2)
assert_almost_equal(c, 1.00, 2)
assert_almost_equal(v, 0.73, 2)
def test_not_complete_and_not_homogeneous_labeling():
# neither complete nor homogeneous but not so bad either
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 0, 1, 1, 1],
[0, 1, 0, 1, 2, 2])
assert_almost_equal(h, 0.67, 2)
assert_almost_equal(c, 0.42, 2)
assert_almost_equal(v, 0.52, 2)
def test_non_consicutive_labels():
# regression tests for labels with gaps
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 0, 2, 2, 2],
[0, 1, 0, 1, 2, 2])
assert_almost_equal(h, 0.67, 2)
assert_almost_equal(c, 0.42, 2)
assert_almost_equal(v, 0.52, 2)
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 0, 1, 1, 1],
[0, 4, 0, 4, 2, 2])
assert_almost_equal(h, 0.67, 2)
assert_almost_equal(c, 0.42, 2)
assert_almost_equal(v, 0.52, 2)
ari_1 = adjusted_rand_score([0, 0, 0, 1, 1, 1], [0, 1, 0, 1, 2, 2])
ari_2 = adjusted_rand_score([0, 0, 0, 1, 1, 1], [0, 4, 0, 4, 2, 2])
assert_almost_equal(ari_1, 0.24, 2)
assert_almost_equal(ari_2, 0.24, 2)
def uniform_labelings_scores(score_func, n_samples, k_range, n_runs=10,
seed=42):
# Compute score for random uniform cluster labelings
random_labels = np.random.RandomState(seed).random_integers
scores = np.zeros((len(k_range), n_runs))
for i, k in enumerate(k_range):
for j in range(n_runs):
labels_a = random_labels(low=0, high=k - 1, size=n_samples)
labels_b = random_labels(low=0, high=k - 1, size=n_samples)
scores[i, j] = score_func(labels_a, labels_b)
return scores
def test_adjustment_for_chance():
# Check that adjusted scores are almost zero on random labels
n_clusters_range = [2, 10, 50, 90]
n_samples = 100
n_runs = 10
scores = uniform_labelings_scores(
adjusted_rand_score, n_samples, n_clusters_range, n_runs)
max_abs_scores = np.abs(scores).max(axis=1)
assert_array_almost_equal(max_abs_scores, [0.02, 0.03, 0.03, 0.02], 2)
def test_adjusted_mutual_info_score():
# Compute the Adjusted Mutual Information and test against known values
labels_a = np.array([1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3])
labels_b = np.array([1, 1, 1, 1, 2, 1, 2, 2, 2, 2, 3, 1, 3, 3, 3, 2, 2])
# Mutual information
mi = mutual_info_score(labels_a, labels_b)
assert_almost_equal(mi, 0.41022, 5)
# Expected mutual information
C = contingency_matrix(labels_a, labels_b)
n_samples = np.sum(C)
emi = expected_mutual_information(C, n_samples)
assert_almost_equal(emi, 0.15042, 5)
# Adjusted mutual information
ami = adjusted_mutual_info_score(labels_a, labels_b)
assert_almost_equal(ami, 0.27502, 5)
ami = adjusted_mutual_info_score([1, 1, 2, 2], [2, 2, 3, 3])
assert_equal(ami, 1.0)
# Test with a very large array
a110 = np.array([list(labels_a) * 110]).flatten()
b110 = np.array([list(labels_b) * 110]).flatten()
ami = adjusted_mutual_info_score(a110, b110)
# This is not accurate to more than 2 places
assert_almost_equal(ami, 0.37, 2)
def test_entropy():
ent = entropy([0, 0, 42.])
assert_almost_equal(ent, 0.6365141, 5)
assert_almost_equal(entropy([]), 1)
def test_contingency_matrix():
labels_a = np.array([1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3])
labels_b = np.array([1, 1, 1, 1, 2, 1, 2, 2, 2, 2, 3, 1, 3, 3, 3, 2, 2])
C = contingency_matrix(labels_a, labels_b)
C2 = np.histogram2d(labels_a, labels_b,
bins=(np.arange(1, 5),
np.arange(1, 5)))[0]
assert_array_almost_equal(C, C2)
C = contingency_matrix(labels_a, labels_b, eps=.1)
assert_array_almost_equal(C, C2 + .1)
def test_exactly_zero_info_score():
# Check numerical stability when information is exactly zero
for i in np.logspace(1, 4, 4).astype(np.int):
labels_a, labels_b = np.ones(i, dtype=np.int),\
np.arange(i, dtype=np.int)
assert_equal(normalized_mutual_info_score(labels_a, labels_b), 0.0)
assert_equal(v_measure_score(labels_a, labels_b), 0.0)
assert_equal(adjusted_mutual_info_score(labels_a, labels_b), 0.0)
assert_equal(normalized_mutual_info_score(labels_a, labels_b), 0.0)
def test_v_measure_and_mutual_information(seed=36):
# Check relation between v_measure, entropy and mutual information
for i in np.logspace(1, 4, 4).astype(np.int):
random_state = np.random.RandomState(seed)
labels_a, labels_b = random_state.random_integers(0, 10, i),\
random_state.random_integers(0, 10, i)
assert_almost_equal(v_measure_score(labels_a, labels_b),
2.0 * mutual_info_score(labels_a, labels_b) /
(entropy(labels_a) + entropy(labels_b)), 0)
| bsd-3-clause |
akloster/bokeh | examples/compat/mpl/lc_offsets.py | 34 | 1096 | from matplotlib.collections import LineCollection
import matplotlib.pyplot as plt
import numpy as np
from bokeh import mpl
from bokeh.plotting import output_file, show
# Simulate a series of ocean current profiles, successively
# offset by 0.1 m/s so that they form what is sometimes called
# a "waterfall" plot or a "stagger" plot.
nverts = 60
ncurves = 20
offs = (0.1, 0.0)
rs = np.random.RandomState([12345678])
yy = np.linspace(0, 2 * np.pi, nverts)
ym = np.amax(yy)
xx = (0.2 + (ym - yy) / ym) ** 2 * np.cos(yy - 0.4) * 0.5
segs = []
for i in range(ncurves):
xxx = xx + 0.02 * rs.randn(nverts)
curve = list(zip(xxx, yy * 100))
segs.append(curve)
colors = [(1.0, 0.0, 0.0, 1.0), (0.0, 0.5, 0.0, 1.0), (0.0, 0.0, 1.0, 1.0),
(0.0, 0.75, 0.75, 1.0), (0.75, 0.75, 0, 1.0), (0.75, 0, 0.75, 1.0),
(0.0, 0.0, 0.0, 1.0)]
col = LineCollection(segs, linewidth=5, offsets=offs)
ax = plt.axes()
ax.add_collection(col, autolim=True)
col.set_color(colors)
ax.set_title('Successive data offsets')
fig = plt.gcf()
output_file("lc_offsets.html")
show(mpl.to_bokeh())
| bsd-3-clause |
ethz-asl/segmatch | segmappy/setup.py | 1 | 1338 | from setuptools import setup, find_packages
# check some version of tensorflow has been installed
try:
import tensorflow
if not tensorflow.__version__.startswith('1.8'):
print("Warning: You are running tensorflow version {}. segmappy was tested with version 1.8.0 . If you encounter issues, please report them to the segmap developers.".format(tensorflow.__version__))
except ImportError:
print("Error: segmappy requires some version of Tensorflow (with/without GPU).")
raise
# python package setup
setup(
name="segmappy",
version="0.1",
description="Segmap Python Tools",
url="http://github.com/ethz-asl/segmap",
author="Andrei Cramariuc",
author_email="[email protected]",
license="MIT",
packages=find_packages(),
scripts=["bin/ensure_segmappy_is_installed.py",
"bin/segmappy_train_cnn",
"bin/segmappy_train_semantics",
"bin/segmappy_plot_roc_from_matches",
"bin/segmappy_plot_acc_versus_size",
"bin/segmappy_download_datasets"],
package_data = {'segmappy': ['config/*.ini']},
install_requires = [
"scikit-learn>=0.19.1",
"scipy>=0.19.1",
"configparser>=3.5.0",
"future>=0.16.0",
"matplotlib>=2.2.2",
"numpy>=1.14.3",
"pandas>=0.22.0"
],
zip_safe=False,
)
| bsd-3-clause |
codles/UpDownMethods | UpDownMethods/plot.py | 1 | 1966 | import matplotlib.pyplot as plt
import UpDownMethods as ud
def plot_results(results, midpoints=False, figure=None, estimate=False,
reversals=False, runs=True):
if figure is None:
figure = plt.figure()
figure.clf()
figure.add_subplot(111)
plt.hold(True)
# Plot correct responses
corr = results[results['Responses'] == True]
if len(corr) > 0:
plt.scatter(corr.index+1, corr.Value, s=50, marker='+', c='k')
# Plot incorrect responses
incorr = results[results['Responses'] == False]
if len(incorr) > 0:
plt.scatter(incorr.index+1, incorr.Value, s=50, marker='_', c='k')
# Indicate reversals
if reversals:
reversal = results[results['Reversal'] == True]
if len(reversal) > 0:
plt.scatter(reversal.index+1, reversal.Value, facecolors='none',
edgecolors='k', s=200)
# Track the runs
if runs is not False:
runs = ud.runs(results)
for i in range(len(runs)):
r = runs.iloc[[i]]
start = r["Start"]
end = r["Finish"]
mid = start + (end-start)/2
runY = min(results.Value)-1
plt.errorbar(mid, runY, xerr=(end-start)/2, c='k')
plt.annotate(str(int(i+1)), xy=(mid, runY-0.5), xytext=(mid, runY-0.5))
if estimate is not False:
est = ud.estimate_reversals(results, num=estimate)
if est is not None:
plt.axhline(y=est, ls='--')
plt.text(0, est+0.05, "Estimate = " + str(est), fontsize=12)
if midpoints:
mids = ud.midpoints(results)
for i in range(len(mids)):
plt.scatter(mids['CentreTrial'].values[i],
mids['Midpoint'].values[i], c='r')
if len(results) > 0:
plt.xlim(-0.5, max(results.index) + 2.5)
plt.ylabel('Stimulus Value', fontsize=14)
plt.xlabel('Trial Number', fontsize=14)
return figure
| mit |
cybernet14/scikit-learn | sklearn/manifold/tests/test_mds.py | 324 | 1862 | import numpy as np
from numpy.testing import assert_array_almost_equal
from nose.tools import assert_raises
from sklearn.manifold import mds
def test_smacof():
# test metric smacof using the data of "Modern Multidimensional Scaling",
# Borg & Groenen, p 154
sim = np.array([[0, 5, 3, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
Z = np.array([[-.266, -.539],
[.451, .252],
[.016, -.238],
[-.200, .524]])
X, _ = mds.smacof(sim, init=Z, n_components=2, max_iter=1, n_init=1)
X_true = np.array([[-1.415, -2.471],
[1.633, 1.107],
[.249, -.067],
[-.468, 1.431]])
assert_array_almost_equal(X, X_true, decimal=3)
def test_smacof_error():
# Not symmetric similarity matrix:
sim = np.array([[0, 5, 9, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
assert_raises(ValueError, mds.smacof, sim)
# Not squared similarity matrix:
sim = np.array([[0, 5, 9, 4],
[5, 0, 2, 2],
[4, 2, 1, 0]])
assert_raises(ValueError, mds.smacof, sim)
# init not None and not correct format:
sim = np.array([[0, 5, 3, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
Z = np.array([[-.266, -.539],
[.016, -.238],
[-.200, .524]])
assert_raises(ValueError, mds.smacof, sim, init=Z, n_init=1)
def test_MDS():
sim = np.array([[0, 5, 3, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
mds_clf = mds.MDS(metric=False, n_jobs=3, dissimilarity="precomputed")
mds_clf.fit(sim)
| bsd-3-clause |
wanghaven/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/lines.py | 69 | 48233 | """
This module contains all the 2D line class which can draw with a
variety of line styles, markers and colors.
"""
# TODO: expose cap and join style attrs
from __future__ import division
import numpy as np
from numpy import ma
from matplotlib import verbose
import artist
from artist import Artist
from cbook import iterable, is_string_like, is_numlike, ls_mapper, dedent,\
flatten
from colors import colorConverter
from path import Path
from transforms import Affine2D, Bbox, TransformedPath, IdentityTransform
from matplotlib import rcParams
# special-purpose marker identifiers:
(TICKLEFT, TICKRIGHT, TICKUP, TICKDOWN,
CARETLEFT, CARETRIGHT, CARETUP, CARETDOWN) = range(8)
# COVERAGE NOTE: Never called internally or from examples
def unmasked_index_ranges(mask, compressed = True):
warnings.warn("Import this directly from matplotlib.cbook",
DeprecationWarning)
# Warning added 2008/07/22
from matplotlib.cbook import unmasked_index_ranges as _unmasked_index_ranges
return _unmasked_index_ranges(mask, compressed=compressed)
def segment_hits(cx, cy, x, y, radius):
"""
Determine if any line segments are within radius of a
point. Returns the list of line segments that are within that
radius.
"""
# Process single points specially
if len(x) < 2:
res, = np.nonzero( (cx - x)**2 + (cy - y)**2 <= radius**2 )
return res
# We need to lop the last element off a lot.
xr,yr = x[:-1],y[:-1]
# Only look at line segments whose nearest point to C on the line
# lies within the segment.
dx,dy = x[1:]-xr, y[1:]-yr
Lnorm_sq = dx**2+dy**2 # Possibly want to eliminate Lnorm==0
u = ( (cx-xr)*dx + (cy-yr)*dy )/Lnorm_sq
candidates = (u>=0) & (u<=1)
#if any(candidates): print "candidates",xr[candidates]
# Note that there is a little area near one side of each point
# which will be near neither segment, and another which will
# be near both, depending on the angle of the lines. The
# following radius test eliminates these ambiguities.
point_hits = (cx - x)**2 + (cy - y)**2 <= radius**2
#if any(point_hits): print "points",xr[candidates]
candidates = candidates & ~(point_hits[:-1] | point_hits[1:])
# For those candidates which remain, determine how far they lie away
# from the line.
px,py = xr+u*dx,yr+u*dy
line_hits = (cx-px)**2 + (cy-py)**2 <= radius**2
#if any(line_hits): print "lines",xr[candidates]
line_hits = line_hits & candidates
points, = point_hits.ravel().nonzero()
lines, = line_hits.ravel().nonzero()
#print points,lines
return np.concatenate((points,lines))
class Line2D(Artist):
"""
A line - the line can have both a solid linestyle connecting all
the vertices, and a marker at each vertex. Additionally, the
drawing of the solid line is influenced by the drawstyle, eg one
can create "stepped" lines in various styles.
"""
lineStyles = _lineStyles = { # hidden names deprecated
'-' : '_draw_solid',
'--' : '_draw_dashed',
'-.' : '_draw_dash_dot',
':' : '_draw_dotted',
'None' : '_draw_nothing',
' ' : '_draw_nothing',
'' : '_draw_nothing',
}
_drawStyles_l = {
'default' : '_draw_lines',
'steps-mid' : '_draw_steps_mid',
'steps-pre' : '_draw_steps_pre',
'steps-post' : '_draw_steps_post',
}
_drawStyles_s = {
'steps' : '_draw_steps_pre',
}
drawStyles = {}
drawStyles.update(_drawStyles_l)
drawStyles.update(_drawStyles_s)
markers = _markers = { # hidden names deprecated
'.' : '_draw_point',
',' : '_draw_pixel',
'o' : '_draw_circle',
'v' : '_draw_triangle_down',
'^' : '_draw_triangle_up',
'<' : '_draw_triangle_left',
'>' : '_draw_triangle_right',
'1' : '_draw_tri_down',
'2' : '_draw_tri_up',
'3' : '_draw_tri_left',
'4' : '_draw_tri_right',
's' : '_draw_square',
'p' : '_draw_pentagon',
'*' : '_draw_star',
'h' : '_draw_hexagon1',
'H' : '_draw_hexagon2',
'+' : '_draw_plus',
'x' : '_draw_x',
'D' : '_draw_diamond',
'd' : '_draw_thin_diamond',
'|' : '_draw_vline',
'_' : '_draw_hline',
TICKLEFT : '_draw_tickleft',
TICKRIGHT : '_draw_tickright',
TICKUP : '_draw_tickup',
TICKDOWN : '_draw_tickdown',
CARETLEFT : '_draw_caretleft',
CARETRIGHT : '_draw_caretright',
CARETUP : '_draw_caretup',
CARETDOWN : '_draw_caretdown',
'None' : '_draw_nothing',
' ' : '_draw_nothing',
'' : '_draw_nothing',
}
filled_markers = ('o', '^', 'v', '<', '>',
's', 'd', 'D', 'h', 'H', 'p', '*')
zorder = 2
validCap = ('butt', 'round', 'projecting')
validJoin = ('miter', 'round', 'bevel')
def __str__(self):
if self._label != "":
return "Line2D(%s)"%(self._label)
elif hasattr(self, '_x') and len(self._x) > 3:
return "Line2D((%g,%g),(%g,%g),...,(%g,%g))"\
%(self._x[0],self._y[0],self._x[0],self._y[0],self._x[-1],self._y[-1])
elif hasattr(self, '_x'):
return "Line2D(%s)"\
%(",".join(["(%g,%g)"%(x,y) for x,y in zip(self._x,self._y)]))
else:
return "Line2D()"
def __init__(self, xdata, ydata,
linewidth = None, # all Nones default to rc
linestyle = None,
color = None,
marker = None,
markersize = None,
markeredgewidth = None,
markeredgecolor = None,
markerfacecolor = None,
antialiased = None,
dash_capstyle = None,
solid_capstyle = None,
dash_joinstyle = None,
solid_joinstyle = None,
pickradius = 5,
drawstyle = None,
**kwargs
):
"""
Create a :class:`~matplotlib.lines.Line2D` instance with *x*
and *y* data in sequences *xdata*, *ydata*.
The kwargs are :class:`~matplotlib.lines.Line2D` properties:
%(Line2D)s
See :meth:`set_linestyle` for a decription of the line styles,
:meth:`set_marker` for a description of the markers, and
:meth:`set_drawstyle` for a description of the draw styles.
"""
Artist.__init__(self)
#convert sequences to numpy arrays
if not iterable(xdata):
raise RuntimeError('xdata must be a sequence')
if not iterable(ydata):
raise RuntimeError('ydata must be a sequence')
if linewidth is None : linewidth=rcParams['lines.linewidth']
if linestyle is None : linestyle=rcParams['lines.linestyle']
if marker is None : marker=rcParams['lines.marker']
if color is None : color=rcParams['lines.color']
if markersize is None : markersize=rcParams['lines.markersize']
if antialiased is None : antialiased=rcParams['lines.antialiased']
if dash_capstyle is None : dash_capstyle=rcParams['lines.dash_capstyle']
if dash_joinstyle is None : dash_joinstyle=rcParams['lines.dash_joinstyle']
if solid_capstyle is None : solid_capstyle=rcParams['lines.solid_capstyle']
if solid_joinstyle is None : solid_joinstyle=rcParams['lines.solid_joinstyle']
if drawstyle is None : drawstyle='default'
self.set_dash_capstyle(dash_capstyle)
self.set_dash_joinstyle(dash_joinstyle)
self.set_solid_capstyle(solid_capstyle)
self.set_solid_joinstyle(solid_joinstyle)
self.set_linestyle(linestyle)
self.set_drawstyle(drawstyle)
self.set_linewidth(linewidth)
self.set_color(color)
self.set_marker(marker)
self.set_antialiased(antialiased)
self.set_markersize(markersize)
self._dashSeq = None
self.set_markerfacecolor(markerfacecolor)
self.set_markeredgecolor(markeredgecolor)
self.set_markeredgewidth(markeredgewidth)
self._point_size_reduction = 0.5
self.verticalOffset = None
# update kwargs before updating data to give the caller a
# chance to init axes (and hence unit support)
self.update(kwargs)
self.pickradius = pickradius
if is_numlike(self._picker):
self.pickradius = self._picker
self._xorig = np.asarray([])
self._yorig = np.asarray([])
self._invalid = True
self.set_data(xdata, ydata)
def contains(self, mouseevent):
"""
Test whether the mouse event occurred on the line. The pick
radius determines the precision of the location test (usually
within five points of the value). Use
:meth:`~matplotlib.lines.Line2D.get_pickradius` or
:meth:`~matplotlib.lines.Line2D.set_pickradius` to view or
modify it.
Returns *True* if any values are within the radius along with
``{'ind': pointlist}``, where *pointlist* is the set of points
within the radius.
TODO: sort returned indices by distance
"""
if callable(self._contains): return self._contains(self,mouseevent)
if not is_numlike(self.pickradius):
raise ValueError,"pick radius should be a distance"
# Make sure we have data to plot
if self._invalid:
self.recache()
if len(self._xy)==0: return False,{}
# Convert points to pixels
path, affine = self._transformed_path.get_transformed_path_and_affine()
path = affine.transform_path(path)
xy = path.vertices
xt = xy[:, 0]
yt = xy[:, 1]
# Convert pick radius from points to pixels
if self.figure == None:
warning.warn('no figure set when check if mouse is on line')
pixels = self.pickradius
else:
pixels = self.figure.dpi/72. * self.pickradius
# Check for collision
if self._linestyle in ['None',None]:
# If no line, return the nearby point(s)
d = (xt-mouseevent.x)**2 + (yt-mouseevent.y)**2
ind, = np.nonzero(np.less_equal(d, pixels**2))
else:
# If line, return the nearby segment(s)
ind = segment_hits(mouseevent.x,mouseevent.y,xt,yt,pixels)
# Debugging message
if False and self._label != u'':
print "Checking line",self._label,"at",mouseevent.x,mouseevent.y
print 'xt', xt
print 'yt', yt
#print 'dx,dy', (xt-mouseevent.x)**2., (yt-mouseevent.y)**2.
print 'ind',ind
# Return the point(s) within radius
return len(ind)>0,dict(ind=ind)
def get_pickradius(self):
'return the pick radius used for containment tests'
return self.pickradius
def setpickradius(self,d):
"""Sets the pick radius used for containment tests
ACCEPTS: float distance in points
"""
self.pickradius = d
def set_picker(self,p):
"""Sets the event picker details for the line.
ACCEPTS: float distance in points or callable pick function
``fn(artist, event)``
"""
if callable(p):
self._contains = p
else:
self.pickradius = p
self._picker = p
def get_window_extent(self, renderer):
bbox = Bbox.unit()
bbox.update_from_data_xy(self.get_transform().transform(self.get_xydata()),
ignore=True)
# correct for marker size, if any
if self._marker is not None:
ms = (self._markersize / 72.0 * self.figure.dpi) * 0.5
bbox = bbox.padded(ms)
return bbox
def set_axes(self, ax):
Artist.set_axes(self, ax)
if ax.xaxis is not None:
self._xcid = ax.xaxis.callbacks.connect('units', self.recache)
if ax.yaxis is not None:
self._ycid = ax.yaxis.callbacks.connect('units', self.recache)
set_axes.__doc__ = Artist.set_axes.__doc__
def set_data(self, *args):
"""
Set the x and y data
ACCEPTS: 2D array
"""
if len(args)==1:
x, y = args[0]
else:
x, y = args
not_masked = 0
if not ma.isMaskedArray(x):
x = np.asarray(x)
not_masked += 1
if not ma.isMaskedArray(y):
y = np.asarray(y)
not_masked += 1
if (not_masked < 2 or
(x is not self._xorig and
(x.shape != self._xorig.shape or np.any(x != self._xorig))) or
(y is not self._yorig and
(y.shape != self._yorig.shape or np.any(y != self._yorig)))):
self._xorig = x
self._yorig = y
self._invalid = True
def recache(self):
#if self.axes is None: print 'recache no axes'
#else: print 'recache units', self.axes.xaxis.units, self.axes.yaxis.units
if ma.isMaskedArray(self._xorig) or ma.isMaskedArray(self._yorig):
x = ma.asarray(self.convert_xunits(self._xorig), float)
y = ma.asarray(self.convert_yunits(self._yorig), float)
x = ma.ravel(x)
y = ma.ravel(y)
else:
x = np.asarray(self.convert_xunits(self._xorig), float)
y = np.asarray(self.convert_yunits(self._yorig), float)
x = np.ravel(x)
y = np.ravel(y)
if len(x)==1 and len(y)>1:
x = x * np.ones(y.shape, float)
if len(y)==1 and len(x)>1:
y = y * np.ones(x.shape, float)
if len(x) != len(y):
raise RuntimeError('xdata and ydata must be the same length')
x = x.reshape((len(x), 1))
y = y.reshape((len(y), 1))
if ma.isMaskedArray(x) or ma.isMaskedArray(y):
self._xy = ma.concatenate((x, y), 1)
else:
self._xy = np.concatenate((x, y), 1)
self._x = self._xy[:, 0] # just a view
self._y = self._xy[:, 1] # just a view
# Masked arrays are now handled by the Path class itself
self._path = Path(self._xy)
self._transformed_path = TransformedPath(self._path, self.get_transform())
self._invalid = False
def set_transform(self, t):
"""
set the Transformation instance used by this artist
ACCEPTS: a :class:`matplotlib.transforms.Transform` instance
"""
Artist.set_transform(self, t)
self._invalid = True
# self._transformed_path = TransformedPath(self._path, self.get_transform())
def _is_sorted(self, x):
"return true if x is sorted"
if len(x)<2: return 1
return np.alltrue(x[1:]-x[0:-1]>=0)
def draw(self, renderer):
if self._invalid:
self.recache()
renderer.open_group('line2d')
if not self._visible: return
gc = renderer.new_gc()
self._set_gc_clip(gc)
gc.set_foreground(self._color)
gc.set_antialiased(self._antialiased)
gc.set_linewidth(self._linewidth)
gc.set_alpha(self._alpha)
if self.is_dashed():
cap = self._dashcapstyle
join = self._dashjoinstyle
else:
cap = self._solidcapstyle
join = self._solidjoinstyle
gc.set_joinstyle(join)
gc.set_capstyle(cap)
gc.set_snap(self.get_snap())
funcname = self._lineStyles.get(self._linestyle, '_draw_nothing')
if funcname != '_draw_nothing':
tpath, affine = self._transformed_path.get_transformed_path_and_affine()
self._lineFunc = getattr(self, funcname)
funcname = self.drawStyles.get(self._drawstyle, '_draw_lines')
drawFunc = getattr(self, funcname)
drawFunc(renderer, gc, tpath, affine.frozen())
if self._marker is not None:
gc = renderer.new_gc()
self._set_gc_clip(gc)
gc.set_foreground(self.get_markeredgecolor())
gc.set_linewidth(self._markeredgewidth)
gc.set_alpha(self._alpha)
funcname = self._markers.get(self._marker, '_draw_nothing')
if funcname != '_draw_nothing':
tpath, affine = self._transformed_path.get_transformed_points_and_affine()
markerFunc = getattr(self, funcname)
markerFunc(renderer, gc, tpath, affine.frozen())
renderer.close_group('line2d')
def get_antialiased(self): return self._antialiased
def get_color(self): return self._color
def get_drawstyle(self): return self._drawstyle
def get_linestyle(self): return self._linestyle
def get_linewidth(self): return self._linewidth
def get_marker(self): return self._marker
def get_markeredgecolor(self):
if (is_string_like(self._markeredgecolor) and
self._markeredgecolor == 'auto'):
if self._marker in self.filled_markers:
return 'k'
else:
return self._color
else:
return self._markeredgecolor
return self._markeredgecolor
def get_markeredgewidth(self): return self._markeredgewidth
def get_markerfacecolor(self):
if (self._markerfacecolor is None or
(is_string_like(self._markerfacecolor) and
self._markerfacecolor.lower()=='none') ):
return self._markerfacecolor
elif (is_string_like(self._markerfacecolor) and
self._markerfacecolor.lower() == 'auto'):
return self._color
else:
return self._markerfacecolor
def get_markersize(self): return self._markersize
def get_data(self, orig=True):
"""
Return the xdata, ydata.
If *orig* is *True*, return the original data
"""
return self.get_xdata(orig=orig), self.get_ydata(orig=orig)
def get_xdata(self, orig=True):
"""
Return the xdata.
If *orig* is *True*, return the original data, else the
processed data.
"""
if orig:
return self._xorig
if self._invalid:
self.recache()
return self._x
def get_ydata(self, orig=True):
"""
Return the ydata.
If *orig* is *True*, return the original data, else the
processed data.
"""
if orig:
return self._yorig
if self._invalid:
self.recache()
return self._y
def get_path(self):
"""
Return the :class:`~matplotlib.path.Path` object associated
with this line.
"""
if self._invalid:
self.recache()
return self._path
def get_xydata(self):
"""
Return the *xy* data as a Nx2 numpy array.
"""
if self._invalid:
self.recache()
return self._xy
def set_antialiased(self, b):
"""
True if line should be drawin with antialiased rendering
ACCEPTS: [True | False]
"""
self._antialiased = b
def set_color(self, color):
"""
Set the color of the line
ACCEPTS: any matplotlib color
"""
self._color = color
def set_drawstyle(self, drawstyle):
"""
Set the drawstyle of the plot
'default' connects the points with lines. The steps variants
produce step-plots. 'steps' is equivalent to 'steps-pre' and
is maintained for backward-compatibility.
ACCEPTS: [ 'default' | 'steps' | 'steps-pre' | 'steps-mid' | 'steps-post' ]
"""
self._drawstyle = drawstyle
def set_linewidth(self, w):
"""
Set the line width in points
ACCEPTS: float value in points
"""
self._linewidth = w
def set_linestyle(self, linestyle):
"""
Set the linestyle of the line (also accepts drawstyles)
================ =================
linestyle description
================ =================
'-' solid
'--' dashed
'-.' dash_dot
':' dotted
'None' draw nothing
' ' draw nothing
'' draw nothing
================ =================
'steps' is equivalent to 'steps-pre' and is maintained for
backward-compatibility.
.. seealso::
:meth:`set_drawstyle`
ACCEPTS: [ '-' | '--' | '-.' | ':' | 'None' | ' ' | '' ] and
any drawstyle in combination with a linestyle, e.g. 'steps--'.
"""
# handle long drawstyle names before short ones !
for ds in flatten([k.keys() for k in (self._drawStyles_l,
self._drawStyles_s)], is_string_like):
if linestyle.startswith(ds):
self.set_drawstyle(ds)
if len(linestyle) > len(ds):
linestyle = linestyle[len(ds):]
else:
linestyle = '-'
if linestyle not in self._lineStyles:
if linestyle in ls_mapper:
linestyle = ls_mapper[linestyle]
else:
verbose.report('Unrecognized line style %s, %s' %
(linestyle, type(linestyle)))
if linestyle in [' ','']:
linestyle = 'None'
self._linestyle = linestyle
def set_marker(self, marker):
"""
Set the line marker
========== ==========================
marker description
========== ==========================
'.' point
',' pixel
'o' circle
'v' triangle_down
'^' triangle_up
'<' triangle_left
'>' triangle_right
'1' tri_down
'2' tri_up
'3' tri_left
'4' tri_right
's' square
'p' pentagon
'*' star
'h' hexagon1
'H' hexagon2
'+' plus
'x' x
'D' diamond
'd' thin_diamond
'|' vline
'_' hline
TICKLEFT tickleft
TICKRIGHT tickright
TICKUP tickup
TICKDOWN tickdown
CARETLEFT caretleft
CARETRIGHT caretright
CARETUP caretup
CARETDOWN caretdown
'None' nothing
' ' nothing
'' nothing
========== ==========================
ACCEPTS: [ '+' | '*' | ',' | '.' | '1' | '2' | '3' | '4'
| '<' | '>' | 'D' | 'H' | '^' | '_' | 'd'
| 'h' | 'o' | 'p' | 's' | 'v' | 'x' | '|'
| TICKUP | TICKDOWN | TICKLEFT | TICKRIGHT
| 'None' | ' ' | '' ]
"""
if marker not in self._markers:
verbose.report('Unrecognized marker style %s, %s' %
(marker, type(marker)))
if marker in [' ','']:
marker = 'None'
self._marker = marker
self._markerFunc = self._markers[marker]
def set_markeredgecolor(self, ec):
"""
Set the marker edge color
ACCEPTS: any matplotlib color
"""
if ec is None :
ec = 'auto'
self._markeredgecolor = ec
def set_markeredgewidth(self, ew):
"""
Set the marker edge width in points
ACCEPTS: float value in points
"""
if ew is None :
ew = rcParams['lines.markeredgewidth']
self._markeredgewidth = ew
def set_markerfacecolor(self, fc):
"""
Set the marker face color
ACCEPTS: any matplotlib color
"""
if fc is None :
fc = 'auto'
self._markerfacecolor = fc
def set_markersize(self, sz):
"""
Set the marker size in points
ACCEPTS: float
"""
self._markersize = sz
def set_xdata(self, x):
"""
Set the data np.array for x
ACCEPTS: 1D array
"""
x = np.asarray(x)
self.set_data(x, self._yorig)
def set_ydata(self, y):
"""
Set the data np.array for y
ACCEPTS: 1D array
"""
y = np.asarray(y)
self.set_data(self._xorig, y)
def set_dashes(self, seq):
"""
Set the dash sequence, sequence of dashes with on off ink in
points. If seq is empty or if seq = (None, None), the
linestyle will be set to solid.
ACCEPTS: sequence of on/off ink in points
"""
if seq == (None, None) or len(seq)==0:
self.set_linestyle('-')
else:
self.set_linestyle('--')
self._dashSeq = seq # TODO: offset ignored for now
def _draw_lines(self, renderer, gc, path, trans):
self._lineFunc(renderer, gc, path, trans)
def _draw_steps_pre(self, renderer, gc, path, trans):
vertices = self._xy
steps = ma.zeros((2*len(vertices)-1, 2), np.float_)
steps[0::2, 0], steps[1::2, 0] = vertices[:, 0], vertices[:-1, 0]
steps[0::2, 1], steps[1:-1:2, 1] = vertices[:, 1], vertices[1:, 1]
path = Path(steps)
path = path.transformed(self.get_transform())
self._lineFunc(renderer, gc, path, IdentityTransform())
def _draw_steps_post(self, renderer, gc, path, trans):
vertices = self._xy
steps = ma.zeros((2*len(vertices)-1, 2), np.float_)
steps[::2, 0], steps[1:-1:2, 0] = vertices[:, 0], vertices[1:, 0]
steps[0::2, 1], steps[1::2, 1] = vertices[:, 1], vertices[:-1, 1]
path = Path(steps)
path = path.transformed(self.get_transform())
self._lineFunc(renderer, gc, path, IdentityTransform())
def _draw_steps_mid(self, renderer, gc, path, trans):
vertices = self._xy
steps = ma.zeros((2*len(vertices), 2), np.float_)
steps[1:-1:2, 0] = 0.5 * (vertices[:-1, 0] + vertices[1:, 0])
steps[2::2, 0] = 0.5 * (vertices[:-1, 0] + vertices[1:, 0])
steps[0, 0] = vertices[0, 0]
steps[-1, 0] = vertices[-1, 0]
steps[0::2, 1], steps[1::2, 1] = vertices[:, 1], vertices[:, 1]
path = Path(steps)
path = path.transformed(self.get_transform())
self._lineFunc(renderer, gc, path, IdentityTransform())
def _draw_nothing(self, *args, **kwargs):
pass
def _draw_solid(self, renderer, gc, path, trans):
gc.set_linestyle('solid')
renderer.draw_path(gc, path, trans)
def _draw_dashed(self, renderer, gc, path, trans):
gc.set_linestyle('dashed')
if self._dashSeq is not None:
gc.set_dashes(0, self._dashSeq)
renderer.draw_path(gc, path, trans)
def _draw_dash_dot(self, renderer, gc, path, trans):
gc.set_linestyle('dashdot')
renderer.draw_path(gc, path, trans)
def _draw_dotted(self, renderer, gc, path, trans):
gc.set_linestyle('dotted')
renderer.draw_path(gc, path, trans)
def _draw_point(self, renderer, gc, path, path_trans):
w = renderer.points_to_pixels(self._markersize) * \
self._point_size_reduction * 0.5
gc.set_snap(renderer.points_to_pixels(self._markersize) > 3.0)
rgbFace = self._get_rgb_face()
transform = Affine2D().scale(w)
renderer.draw_markers(
gc, Path.unit_circle(), transform, path, path_trans,
rgbFace)
_draw_pixel_transform = Affine2D().translate(-0.5, -0.5)
def _draw_pixel(self, renderer, gc, path, path_trans):
rgbFace = self._get_rgb_face()
gc.set_snap(False)
renderer.draw_markers(gc, Path.unit_rectangle(),
self._draw_pixel_transform,
path, path_trans, rgbFace)
def _draw_circle(self, renderer, gc, path, path_trans):
w = renderer.points_to_pixels(self._markersize) * 0.5
gc.set_snap(renderer.points_to_pixels(self._markersize) > 3.0)
rgbFace = self._get_rgb_face()
transform = Affine2D().scale(w, w)
renderer.draw_markers(
gc, Path.unit_circle(), transform, path, path_trans,
rgbFace)
_triangle_path = Path([[0.0, 1.0], [-1.0, -1.0], [1.0, -1.0], [0.0, 1.0]])
def _draw_triangle_up(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 5.0)
offset = 0.5*renderer.points_to_pixels(self._markersize)
transform = Affine2D().scale(offset, offset)
rgbFace = self._get_rgb_face()
renderer.draw_markers(gc, self._triangle_path, transform,
path, path_trans, rgbFace)
def _draw_triangle_down(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 5.0)
offset = 0.5*renderer.points_to_pixels(self._markersize)
transform = Affine2D().scale(offset, -offset)
rgbFace = self._get_rgb_face()
renderer.draw_markers(gc, self._triangle_path, transform,
path, path_trans, rgbFace)
def _draw_triangle_left(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 5.0)
offset = 0.5*renderer.points_to_pixels(self._markersize)
transform = Affine2D().scale(offset, offset).rotate_deg(90)
rgbFace = self._get_rgb_face()
renderer.draw_markers(gc, self._triangle_path, transform,
path, path_trans, rgbFace)
def _draw_triangle_right(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 5.0)
offset = 0.5*renderer.points_to_pixels(self._markersize)
transform = Affine2D().scale(offset, offset).rotate_deg(-90)
rgbFace = self._get_rgb_face()
renderer.draw_markers(gc, self._triangle_path, transform,
path, path_trans, rgbFace)
def _draw_square(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 2.0)
side = renderer.points_to_pixels(self._markersize)
transform = Affine2D().translate(-0.5, -0.5).scale(side)
rgbFace = self._get_rgb_face()
renderer.draw_markers(gc, Path.unit_rectangle(), transform,
path, path_trans, rgbFace)
def _draw_diamond(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 5.0)
side = renderer.points_to_pixels(self._markersize)
transform = Affine2D().translate(-0.5, -0.5).rotate_deg(45).scale(side)
rgbFace = self._get_rgb_face()
renderer.draw_markers(gc, Path.unit_rectangle(), transform,
path, path_trans, rgbFace)
def _draw_thin_diamond(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 3.0)
offset = renderer.points_to_pixels(self._markersize)
transform = Affine2D().translate(-0.5, -0.5) \
.rotate_deg(45).scale(offset * 0.6, offset)
rgbFace = self._get_rgb_face()
renderer.draw_markers(gc, Path.unit_rectangle(), transform,
path, path_trans, rgbFace)
def _draw_pentagon(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 5.0)
offset = 0.5 * renderer.points_to_pixels(self._markersize)
transform = Affine2D().scale(offset)
rgbFace = self._get_rgb_face()
renderer.draw_markers(gc, Path.unit_regular_polygon(5), transform,
path, path_trans, rgbFace)
def _draw_star(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 5.0)
offset = 0.5 * renderer.points_to_pixels(self._markersize)
transform = Affine2D().scale(offset)
rgbFace = self._get_rgb_face()
_starpath = Path.unit_regular_star(5, innerCircle=0.381966)
renderer.draw_markers(gc, _starpath, transform,
path, path_trans, rgbFace)
def _draw_hexagon1(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 5.0)
offset = 0.5 * renderer.points_to_pixels(self._markersize)
transform = Affine2D().scale(offset)
rgbFace = self._get_rgb_face()
renderer.draw_markers(gc, Path.unit_regular_polygon(6), transform,
path, path_trans, rgbFace)
def _draw_hexagon2(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 5.0)
offset = 0.5 * renderer.points_to_pixels(self._markersize)
transform = Affine2D().scale(offset).rotate_deg(30)
rgbFace = self._get_rgb_face()
renderer.draw_markers(gc, Path.unit_regular_polygon(6), transform,
path, path_trans, rgbFace)
_line_marker_path = Path([[0.0, -1.0], [0.0, 1.0]])
def _draw_vline(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 1.0)
offset = 0.5*renderer.points_to_pixels(self._markersize)
transform = Affine2D().scale(offset)
renderer.draw_markers(gc, self._line_marker_path, transform,
path, path_trans)
def _draw_hline(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 1.0)
offset = 0.5*renderer.points_to_pixels(self._markersize)
transform = Affine2D().scale(offset).rotate_deg(90)
renderer.draw_markers(gc, self._line_marker_path, transform,
path, path_trans)
_tickhoriz_path = Path([[0.0, 0.0], [1.0, 0.0]])
def _draw_tickleft(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 1.0)
offset = renderer.points_to_pixels(self._markersize)
marker_transform = Affine2D().scale(-offset, 1.0)
renderer.draw_markers(gc, self._tickhoriz_path, marker_transform,
path, path_trans)
def _draw_tickright(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 1.0)
offset = renderer.points_to_pixels(self._markersize)
marker_transform = Affine2D().scale(offset, 1.0)
renderer.draw_markers(gc, self._tickhoriz_path, marker_transform,
path, path_trans)
_tickvert_path = Path([[-0.0, 0.0], [-0.0, 1.0]])
def _draw_tickup(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 1.0)
offset = renderer.points_to_pixels(self._markersize)
marker_transform = Affine2D().scale(1.0, offset)
renderer.draw_markers(gc, self._tickvert_path, marker_transform,
path, path_trans)
def _draw_tickdown(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 1.0)
offset = renderer.points_to_pixels(self._markersize)
marker_transform = Affine2D().scale(1.0, -offset)
renderer.draw_markers(gc, self._tickvert_path, marker_transform,
path, path_trans)
_plus_path = Path([[-1.0, 0.0], [1.0, 0.0],
[0.0, -1.0], [0.0, 1.0]],
[Path.MOVETO, Path.LINETO,
Path.MOVETO, Path.LINETO])
def _draw_plus(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 3.0)
offset = 0.5*renderer.points_to_pixels(self._markersize)
transform = Affine2D().scale(offset)
renderer.draw_markers(gc, self._plus_path, transform,
path, path_trans)
_tri_path = Path([[0.0, 0.0], [0.0, -1.0],
[0.0, 0.0], [0.8, 0.5],
[0.0, 0.0], [-0.8, 0.5]],
[Path.MOVETO, Path.LINETO,
Path.MOVETO, Path.LINETO,
Path.MOVETO, Path.LINETO])
def _draw_tri_down(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 5.0)
offset = 0.5*renderer.points_to_pixels(self._markersize)
transform = Affine2D().scale(offset)
renderer.draw_markers(gc, self._tri_path, transform,
path, path_trans)
def _draw_tri_up(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 5.0)
offset = 0.5*renderer.points_to_pixels(self._markersize)
transform = Affine2D().scale(offset).rotate_deg(180)
renderer.draw_markers(gc, self._tri_path, transform,
path, path_trans)
def _draw_tri_left(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 5.0)
offset = 0.5*renderer.points_to_pixels(self._markersize)
transform = Affine2D().scale(offset).rotate_deg(90)
renderer.draw_markers(gc, self._tri_path, transform,
path, path_trans)
def _draw_tri_right(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 5.0)
offset = 0.5*renderer.points_to_pixels(self._markersize)
transform = Affine2D().scale(offset).rotate_deg(270)
renderer.draw_markers(gc, self._tri_path, transform,
path, path_trans)
_caret_path = Path([[-1.0, 1.5], [0.0, 0.0], [1.0, 1.5]])
def _draw_caretdown(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 3.0)
offset = 0.5*renderer.points_to_pixels(self._markersize)
transform = Affine2D().scale(offset)
renderer.draw_markers(gc, self._caret_path, transform,
path, path_trans)
def _draw_caretup(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 3.0)
offset = 0.5*renderer.points_to_pixels(self._markersize)
transform = Affine2D().scale(offset).rotate_deg(180)
renderer.draw_markers(gc, self._caret_path, transform,
path, path_trans)
def _draw_caretleft(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 3.0)
offset = 0.5*renderer.points_to_pixels(self._markersize)
transform = Affine2D().scale(offset).rotate_deg(270)
renderer.draw_markers(gc, self._caret_path, transform,
path, path_trans)
def _draw_caretright(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 3.0)
offset = 0.5*renderer.points_to_pixels(self._markersize)
transform = Affine2D().scale(offset).rotate_deg(90)
renderer.draw_markers(gc, self._caret_path, transform,
path, path_trans)
_x_path = Path([[-1.0, -1.0], [1.0, 1.0],
[-1.0, 1.0], [1.0, -1.0]],
[Path.MOVETO, Path.LINETO,
Path.MOVETO, Path.LINETO])
def _draw_x(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 3.0)
offset = 0.5*renderer.points_to_pixels(self._markersize)
transform = Affine2D().scale(offset)
renderer.draw_markers(gc, self._x_path, transform,
path, path_trans)
def update_from(self, other):
'copy properties from other to self'
Artist.update_from(self, other)
self._linestyle = other._linestyle
self._linewidth = other._linewidth
self._color = other._color
self._markersize = other._markersize
self._markerfacecolor = other._markerfacecolor
self._markeredgecolor = other._markeredgecolor
self._markeredgewidth = other._markeredgewidth
self._dashSeq = other._dashSeq
self._dashcapstyle = other._dashcapstyle
self._dashjoinstyle = other._dashjoinstyle
self._solidcapstyle = other._solidcapstyle
self._solidjoinstyle = other._solidjoinstyle
self._linestyle = other._linestyle
self._marker = other._marker
self._drawstyle = other._drawstyle
def _get_rgb_face(self):
facecolor = self.get_markerfacecolor()
if is_string_like(facecolor) and facecolor.lower()=='none':
rgbFace = None
else:
rgbFace = colorConverter.to_rgb(facecolor)
return rgbFace
# some aliases....
def set_aa(self, val):
'alias for set_antialiased'
self.set_antialiased(val)
def set_c(self, val):
'alias for set_color'
self.set_color(val)
def set_ls(self, val):
'alias for set_linestyle'
self.set_linestyle(val)
def set_lw(self, val):
'alias for set_linewidth'
self.set_linewidth(val)
def set_mec(self, val):
'alias for set_markeredgecolor'
self.set_markeredgecolor(val)
def set_mew(self, val):
'alias for set_markeredgewidth'
self.set_markeredgewidth(val)
def set_mfc(self, val):
'alias for set_markerfacecolor'
self.set_markerfacecolor(val)
def set_ms(self, val):
'alias for set_markersize'
self.set_markersize(val)
def get_aa(self):
'alias for get_antialiased'
return self.get_antialiased()
def get_c(self):
'alias for get_color'
return self.get_color()
def get_ls(self):
'alias for get_linestyle'
return self.get_linestyle()
def get_lw(self):
'alias for get_linewidth'
return self.get_linewidth()
def get_mec(self):
'alias for get_markeredgecolor'
return self.get_markeredgecolor()
def get_mew(self):
'alias for get_markeredgewidth'
return self.get_markeredgewidth()
def get_mfc(self):
'alias for get_markerfacecolor'
return self.get_markerfacecolor()
def get_ms(self):
'alias for get_markersize'
return self.get_markersize()
def set_dash_joinstyle(self, s):
"""
Set the join style for dashed linestyles
ACCEPTS: ['miter' | 'round' | 'bevel']
"""
s = s.lower()
if s not in self.validJoin:
raise ValueError('set_dash_joinstyle passed "%s";\n' % (s,)
+ 'valid joinstyles are %s' % (self.validJoin,))
self._dashjoinstyle = s
def set_solid_joinstyle(self, s):
"""
Set the join style for solid linestyles
ACCEPTS: ['miter' | 'round' | 'bevel']
"""
s = s.lower()
if s not in self.validJoin:
raise ValueError('set_solid_joinstyle passed "%s";\n' % (s,)
+ 'valid joinstyles are %s' % (self.validJoin,))
self._solidjoinstyle = s
def get_dash_joinstyle(self):
"""
Get the join style for dashed linestyles
"""
return self._dashjoinstyle
def get_solid_joinstyle(self):
"""
Get the join style for solid linestyles
"""
return self._solidjoinstyle
def set_dash_capstyle(self, s):
"""
Set the cap style for dashed linestyles
ACCEPTS: ['butt' | 'round' | 'projecting']
"""
s = s.lower()
if s not in self.validCap:
raise ValueError('set_dash_capstyle passed "%s";\n' % (s,)
+ 'valid capstyles are %s' % (self.validCap,))
self._dashcapstyle = s
def set_solid_capstyle(self, s):
"""
Set the cap style for solid linestyles
ACCEPTS: ['butt' | 'round' | 'projecting']
"""
s = s.lower()
if s not in self.validCap:
raise ValueError('set_solid_capstyle passed "%s";\n' % (s,)
+ 'valid capstyles are %s' % (self.validCap,))
self._solidcapstyle = s
def get_dash_capstyle(self):
"""
Get the cap style for dashed linestyles
"""
return self._dashcapstyle
def get_solid_capstyle(self):
"""
Get the cap style for solid linestyles
"""
return self._solidcapstyle
def is_dashed(self):
'return True if line is dashstyle'
return self._linestyle in ('--', '-.', ':')
class VertexSelector:
"""
Manage the callbacks to maintain a list of selected vertices for
:class:`matplotlib.lines.Line2D`. Derived classes should override
:meth:`~matplotlib.lines.VertexSelector.process_selected` to do
something with the picks.
Here is an example which highlights the selected verts with red
circles::
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.lines as lines
class HighlightSelected(lines.VertexSelector):
def __init__(self, line, fmt='ro', **kwargs):
lines.VertexSelector.__init__(self, line)
self.markers, = self.axes.plot([], [], fmt, **kwargs)
def process_selected(self, ind, xs, ys):
self.markers.set_data(xs, ys)
self.canvas.draw()
fig = plt.figure()
ax = fig.add_subplot(111)
x, y = np.random.rand(2, 30)
line, = ax.plot(x, y, 'bs-', picker=5)
selector = HighlightSelected(line)
plt.show()
"""
def __init__(self, line):
"""
Initialize the class with a :class:`matplotlib.lines.Line2D`
instance. The line should already be added to some
:class:`matplotlib.axes.Axes` instance and should have the
picker property set.
"""
if not hasattr(line, 'axes'):
raise RuntimeError('You must first add the line to the Axes')
if line.get_picker() is None:
raise RuntimeError('You must first set the picker property of the line')
self.axes = line.axes
self.line = line
self.canvas = self.axes.figure.canvas
self.cid = self.canvas.mpl_connect('pick_event', self.onpick)
self.ind = set()
def process_selected(self, ind, xs, ys):
"""
Default "do nothing" implementation of the
:meth:`process_selected` method.
*ind* are the indices of the selected vertices. *xs* and *ys*
are the coordinates of the selected vertices.
"""
pass
def onpick(self, event):
'When the line is picked, update the set of selected indicies.'
if event.artist is not self.line: return
for i in event.ind:
if i in self.ind:
self.ind.remove(i)
else:
self.ind.add(i)
ind = list(self.ind)
ind.sort()
xdata, ydata = self.line.get_data()
self.process_selected(ind, xdata[ind], ydata[ind])
lineStyles = Line2D._lineStyles
lineMarkers = Line2D._markers
drawStyles = Line2D.drawStyles
artist.kwdocd['Line2D'] = artist.kwdoc(Line2D)
# You can not set the docstring of an instancemethod,
# but you can on the underlying function. Go figure.
Line2D.__init__.im_func.__doc__ = dedent(Line2D.__init__.__doc__) % artist.kwdocd
| agpl-3.0 |
sugartom/tensorflow-alien | tensorflow/contrib/learn/python/learn/estimators/estimator.py | 8 | 53916 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base Estimator class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import copy
import inspect
import os
import tempfile
import numpy as np
import six
from tensorflow.contrib import framework as contrib_framework
from tensorflow.contrib import layers
from tensorflow.contrib import metrics as metrics_lib
from tensorflow.contrib.framework import deprecated
from tensorflow.contrib.framework import deprecated_args
from tensorflow.contrib.framework import list_variables
from tensorflow.contrib.framework import load_variable
from tensorflow.contrib.framework.python.ops import variables as contrib_variables
from tensorflow.contrib.learn.python.learn import evaluable
from tensorflow.contrib.learn.python.learn import metric_spec
from tensorflow.contrib.learn.python.learn import monitors as monitor_lib
from tensorflow.contrib.learn.python.learn import trainable
from tensorflow.contrib.learn.python.learn.estimators import _sklearn as sklearn
from tensorflow.contrib.learn.python.learn.estimators import metric_key
from tensorflow.contrib.learn.python.learn.estimators import model_fn as model_fn_lib
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.contrib.learn.python.learn.estimators import tensor_signature
from tensorflow.contrib.learn.python.learn.estimators._sklearn import NotFittedError
from tensorflow.contrib.learn.python.learn.learn_io import data_feeder
from tensorflow.contrib.learn.python.learn.utils import export
from tensorflow.contrib.learn.python.learn.utils import saved_model_export_utils
from tensorflow.contrib.training.python.training import evaluation
from tensorflow.core.framework import summary_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session as tf_session
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.saved_model import builder as saved_model_builder
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import device_setter
from tensorflow.python.training import monitored_session
from tensorflow.python.training import saver
from tensorflow.python.training import summary_io
from tensorflow.python.util import compat
AS_ITERABLE_DATE = '2016-09-15'
AS_ITERABLE_INSTRUCTIONS = (
'The default behavior of predict() is changing. The default value for\n'
'as_iterable will change to True, and then the flag will be removed\n'
'altogether. The behavior of this flag is described below.')
SCIKIT_DECOUPLE_DATE = '2016-12-01'
SCIKIT_DECOUPLE_INSTRUCTIONS = (
'Estimator is decoupled from Scikit Learn interface by moving into\n'
'separate class SKCompat. Arguments x, y and batch_size are only\n'
'available in the SKCompat class, Estimator will only accept input_fn.\n'
'Example conversion:\n'
' est = Estimator(...) -> est = SKCompat(Estimator(...))')
def _verify_input_args(x, y, input_fn, feed_fn, batch_size):
"""Verifies validity of co-existance of input arguments."""
if input_fn is None:
if x is None:
raise ValueError('Either x or input_fn must be provided.')
if contrib_framework.is_tensor(x) or (y is not None and
contrib_framework.is_tensor(y)):
raise ValueError('Inputs cannot be tensors. Please provide input_fn.')
if feed_fn is not None:
raise ValueError('Can not provide both feed_fn and x or y.')
else:
if (x is not None) or (y is not None):
raise ValueError('Can not provide both input_fn and x or y.')
if batch_size is not None:
raise ValueError('Can not provide both input_fn and batch_size.')
def _get_input_fn(x, y, input_fn, feed_fn, batch_size, shuffle=False, epochs=1):
"""Make inputs into input and feed functions.
Args:
x: Numpy, Pandas or Dask matrix or iterable.
y: Numpy, Pandas or Dask matrix or iterable.
input_fn: Pre-defined input function for training data.
feed_fn: Pre-defined data feeder function.
batch_size: Size to split data into parts. Must be >= 1.
shuffle: Whether to shuffle the inputs.
epochs: Number of epochs to run.
Returns:
Data input and feeder function based on training data.
Raises:
ValueError: Only one of `(x & y)` or `input_fn` must be provided.
"""
_verify_input_args(x, y, input_fn, feed_fn, batch_size)
if input_fn is not None:
return input_fn, feed_fn
df = data_feeder.setup_train_data_feeder(
x,
y,
n_classes=None,
batch_size=batch_size,
shuffle=shuffle,
epochs=epochs)
return df.input_builder, df.get_feed_dict_fn()
def infer_real_valued_columns_from_input_fn(input_fn):
"""Creates `FeatureColumn` objects for inputs defined by `input_fn`.
This interprets all inputs as dense, fixed-length float values. This creates
a local graph in which it calls `input_fn` to build the tensors, then discards
it.
Args:
input_fn: Input function returning a tuple of:
features - Dictionary of string feature name to `Tensor` or `Tensor`.
labels - `Tensor` of label values.
Returns:
List of `FeatureColumn` objects.
"""
with ops.Graph().as_default():
features, _ = input_fn()
return layers.infer_real_valued_columns(features)
def infer_real_valued_columns_from_input(x):
"""Creates `FeatureColumn` objects for inputs defined by input `x`.
This interprets all inputs as dense, fixed-length float values.
Args:
x: Real-valued matrix of shape [n_samples, n_features...]. Can be
iterator that returns arrays of features.
Returns:
List of `FeatureColumn` objects.
"""
input_fn, _ = _get_input_fn(
x=x, y=None, input_fn=None, feed_fn=None, batch_size=None)
return infer_real_valued_columns_from_input_fn(input_fn)
def _get_arguments(func):
"""Returns list of arguments this function has."""
if hasattr(func, '__code__'):
# Regular function.
return inspect.getargspec(func).args
elif hasattr(func, '__call__'):
# Callable object.
return _get_arguments(func.__call__)
elif hasattr(func, 'func'):
# Partial function.
return _get_arguments(func.func)
def _get_replica_device_setter(config):
"""Creates a replica device setter if required.
Args:
config: A RunConfig instance.
Returns:
A replica device setter, or None.
"""
ps_ops = [
'Variable', 'VariableV2', 'AutoReloadVariable', 'MutableHashTable',
'MutableHashTableOfTensors', 'MutableDenseHashTable'
]
if config.task_type:
worker_device = '/job:%s/task:%d' % (config.task_type, config.task_id)
else:
worker_device = '/job:worker'
if config.num_ps_replicas > 0:
return device_setter.replica_device_setter(
ps_tasks=config.num_ps_replicas, worker_device=worker_device,
merge_devices=True, ps_ops=ps_ops, cluster=config.cluster_spec)
else:
return None
def _make_metrics_ops(metrics, features, labels, predictions):
"""Add metrics based on `features`, `labels`, and `predictions`.
`metrics` contains a specification for how to run metrics. It is a dict
mapping friendly names to either `MetricSpec` objects, or directly to a metric
function (assuming that `predictions` and `labels` are single tensors), or to
`(pred_name, metric)` `tuple`, which passes `predictions[pred_name]` and
`labels` to `metric` (assuming `labels` is a single tensor).
Users are encouraged to use `MetricSpec` objects, which are more flexible and
cleaner. They also lead to clearer errors.
Args:
metrics: A dict mapping names to metrics specification, for example
`MetricSpec` objects.
features: A dict of tensors returned from an input_fn as features/inputs.
labels: A single tensor or a dict of tensors returned from an input_fn as
labels.
predictions: A single tensor or a dict of tensors output from a model as
predictions.
Returns:
A dict mapping the friendly given in `metrics` to the result of calling the
given metric function.
Raises:
ValueError: If metrics specifications do not work with the type of
`features`, `labels`, or `predictions` provided. Mostly, a dict is given
but no pred_name specified.
"""
metrics = metrics or {}
# If labels is a dict with a single key, unpack into a single tensor.
labels_tensor_or_dict = labels
if isinstance(labels, dict) and len(labels) == 1:
labels_tensor_or_dict = labels[list(labels.keys())[0]]
result = {}
# Iterate in lexicographic order, so the graph is identical among runs.
for name, metric in sorted(six.iteritems(metrics)):
if isinstance(metric, metric_spec.MetricSpec):
result[name] = metric.create_metric_ops(features, labels, predictions)
continue
# TODO(b/31229024): Remove the rest of this loop
logging.warning('Please specify metrics using MetricSpec. Using bare '
'functions or (key, fn) tuples is deprecated and support '
'for it will be removed on Oct 1, 2016.')
if isinstance(name, tuple):
# Multi-head metrics.
if len(name) != 2:
raise ValueError('Invalid metric for {}. It returned a tuple with '
'len {}, expected 2.'.format(name, len(name)))
if not isinstance(predictions, dict):
raise ValueError(
'Metrics passed provide (name, prediction), '
'but predictions are not dict. '
'Metrics: %s, Predictions: %s.' % (metrics, predictions))
# Here are two options: labels are single Tensor or a dict.
if isinstance(labels, dict) and name[1] in labels:
# If labels are dict and the prediction name is in it, apply metric.
result[name[0]] = metric(predictions[name[1]], labels[name[1]])
else:
# Otherwise pass the labels to the metric.
result[name[0]] = metric(predictions[name[1]], labels_tensor_or_dict)
else:
# Single head metrics.
if isinstance(predictions, dict):
raise ValueError(
'Metrics passed provide only name, no prediction, '
'but predictions are dict. '
'Metrics: %s, Labels: %s.' % (metrics, labels_tensor_or_dict))
result[name] = metric(predictions, labels_tensor_or_dict)
return result
def _dict_to_str(dictionary):
"""Get a `str` representation of a `dict`.
Args:
dictionary: The `dict` to be represented as `str`.
Returns:
A `str` representing the `dictionary`.
"""
return ', '.join('%s = %s' % (k, v) for k, v in sorted(dictionary.items()))
def _write_dict_to_summary(output_dir,
dictionary,
current_global_step):
"""Writes a `dict` into summary file in given output directory.
Args:
output_dir: `str`, directory to write the summary file in.
dictionary: the `dict` to be written to summary file.
current_global_step: `int`, the current global step.
"""
logging.info('Saving dict for global step %d: %s', current_global_step,
_dict_to_str(dictionary))
summary_writer = summary_io.SummaryWriterCache.get(output_dir)
summary_proto = summary_pb2.Summary()
for key in dictionary:
if dictionary[key] is None:
continue
value = summary_proto.value.add()
value.tag = key
if (isinstance(dictionary[key], np.float32) or
isinstance(dictionary[key], float)):
value.simple_value = float(dictionary[key])
else:
logging.warn('Skipping summary for %s, must be a float or np.float32.',
key)
summary_writer.add_summary(summary_proto, current_global_step)
summary_writer.flush()
class BaseEstimator(
sklearn.BaseEstimator, evaluable.Evaluable, trainable.Trainable):
"""Abstract BaseEstimator class to train and evaluate TensorFlow models.
Users should not instantiate or subclass this class. Instead, use `Estimator`.
"""
__metaclass__ = abc.ABCMeta
# Note that for Google users, this is overriden with
# learn_runner.EstimatorConfig.
# TODO(wicke): Remove this once launcher takes over config functionality
_Config = run_config.RunConfig # pylint: disable=invalid-name
def __init__(self, model_dir=None, config=None):
"""Initializes a BaseEstimator instance.
Args:
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator to
continue training a previously saved model. If `None`, the model_dir in
`config` will be used if set. If both are set, they must be same.
config: A RunConfig instance.
"""
# Create a run configuration.
if config is None:
self._config = BaseEstimator._Config()
logging.info('Using default config.')
else:
self._config = config
logging.info('Using config: %s', str(vars(self._config)))
# Model directory.
if (model_dir is not None) and (self._config.model_dir is not None):
if model_dir != self._config.model_dir:
# TODO(b/9965722): remove this suppression after it is no longer
# necessary.
# pylint: disable=g-doc-exception
raise ValueError(
"model_dir are set both in constructor and RunConfig, but with "
"different values. In constructor: '{}', in RunConfig: "
"'{}' ".format(model_dir, self._config.model_dir))
self._model_dir = model_dir or self._config.model_dir
if self._model_dir is None:
self._model_dir = tempfile.mkdtemp()
logging.warning('Using temporary folder as model directory: %s',
self._model_dir)
# Set device function depending if there are replicas or not.
self._device_fn = _get_replica_device_setter(self._config)
# Features and labels TensorSignature objects.
# TODO(wicke): Rename these to something more descriptive
self._features_info = None
self._labels_info = None
self._graph = None
@property
def config(self):
# TODO(wicke): make RunConfig immutable, and then return it without a copy.
return copy.deepcopy(self._config)
@deprecated_args(
SCIKIT_DECOUPLE_DATE, SCIKIT_DECOUPLE_INSTRUCTIONS, ('x', None),
('y', None), ('batch_size', None)
)
def fit(self, x=None, y=None, input_fn=None, steps=None, batch_size=None,
monitors=None, max_steps=None):
# pylint: disable=g-doc-args,g-doc-return-or-yield
"""See `Trainable`.
Raises:
ValueError: If `x` or `y` are not `None` while `input_fn` is not `None`.
ValueError: If both `steps` and `max_steps` are not `None`.
"""
if (steps is not None) and (max_steps is not None):
raise ValueError('Can not provide both steps and max_steps.')
_verify_input_args(x, y, input_fn, None, batch_size)
if x is not None:
SKCompat(self).fit(x, y, batch_size, steps, max_steps, monitors)
return self
if max_steps is not None:
try:
start_step = load_variable(self._model_dir, ops.GraphKeys.GLOBAL_STEP)
if max_steps <= start_step:
logging.info('Skipping training since max_steps has already saved.')
return self
except: # pylint: disable=bare-except
pass
hooks = monitor_lib.replace_monitors_with_hooks(monitors, self)
if steps is not None or max_steps is not None:
hooks.append(basic_session_run_hooks.StopAtStepHook(steps, max_steps))
loss = self._train_model(input_fn=input_fn, hooks=hooks)
logging.info('Loss for final step: %s.', loss)
return self
@deprecated_args(
SCIKIT_DECOUPLE_DATE, SCIKIT_DECOUPLE_INSTRUCTIONS, ('x', None),
('y', None), ('batch_size', None)
)
def partial_fit(
self, x=None, y=None, input_fn=None, steps=1, batch_size=None,
monitors=None):
"""Incremental fit on a batch of samples.
This method is expected to be called several times consecutively
on different or the same chunks of the dataset. This either can
implement iterative training or out-of-core/online training.
This is especially useful when the whole dataset is too big to
fit in memory at the same time. Or when model is taking long time
to converge, and you want to split up training into subparts.
Args:
x: Matrix of shape [n_samples, n_features...]. Can be iterator that
returns arrays of features. The training input samples for fitting the
model. If set, `input_fn` must be `None`.
y: Vector or matrix [n_samples] or [n_samples, n_outputs]. Can be
iterator that returns array of labels. The training label values
(class labels in classification, real numbers in regression). If set,
`input_fn` must be `None`.
input_fn: Input function. If set, `x`, `y`, and `batch_size` must be
`None`.
steps: Number of steps for which to train model. If `None`, train forever.
batch_size: minibatch size to use on the input, defaults to first
dimension of `x`. Must be `None` if `input_fn` is provided.
monitors: List of `BaseMonitor` subclass instances. Used for callbacks
inside the training loop.
Returns:
`self`, for chaining.
Raises:
ValueError: If at least one of `x` and `y` is provided, and `input_fn` is
provided.
"""
logging.warning('The current implementation of partial_fit is not optimized'
' for use in a loop. Consider using fit() instead.')
return self.fit(x=x, y=y, input_fn=input_fn, steps=steps,
batch_size=batch_size, monitors=monitors)
@deprecated_args(
SCIKIT_DECOUPLE_DATE, SCIKIT_DECOUPLE_INSTRUCTIONS, ('x', None),
('y', None), ('batch_size', None)
)
def evaluate(self,
x=None,
y=None,
input_fn=None,
feed_fn=None,
batch_size=None,
steps=None,
metrics=None,
name=None,
checkpoint_path=None,
hooks=None,
log_progress=True):
# pylint: disable=g-doc-args,g-doc-return-or-yield
"""See `Evaluable`.
Raises:
ValueError: If at least one of `x` or `y` is provided, and at least one of
`input_fn` or `feed_fn` is provided.
Or if `metrics` is not `None` or `dict`.
"""
_verify_input_args(x, y, input_fn, feed_fn, batch_size)
if x is not None:
return SKCompat(self).score(x, y, batch_size, steps, metrics)
if metrics is not None and not isinstance(metrics, dict):
raise ValueError('Metrics argument should be None or dict. '
'Got %s.' % metrics)
eval_results, global_step = self._evaluate_model(
input_fn=input_fn,
feed_fn=feed_fn,
steps=steps,
metrics=metrics,
name=name,
checkpoint_path=checkpoint_path,
hooks=hooks,
log_progress=log_progress)
if eval_results is not None:
eval_results.update({'global_step': global_step})
return eval_results
@deprecated_args(
SCIKIT_DECOUPLE_DATE, SCIKIT_DECOUPLE_INSTRUCTIONS, ('x', None),
('batch_size', None), ('as_iterable', True)
)
def predict(
self, x=None, input_fn=None, batch_size=None, outputs=None,
as_iterable=True):
"""Returns predictions for given features.
Args:
x: Matrix of shape [n_samples, n_features...]. Can be iterator that
returns arrays of features. The training input samples for fitting the
model. If set, `input_fn` must be `None`.
input_fn: Input function. If set, `x` and 'batch_size' must be `None`.
batch_size: Override default batch size. If set, 'input_fn' must be
'None'.
outputs: list of `str`, name of the output to predict.
If `None`, returns all.
as_iterable: If True, return an iterable which keeps yielding predictions
for each example until inputs are exhausted. Note: The inputs must
terminate if you want the iterable to terminate (e.g. be sure to pass
num_epochs=1 if you are using something like read_batch_features).
Returns:
A numpy array of predicted classes or regression values if the
constructor's `model_fn` returns a `Tensor` for `predictions` or a `dict`
of numpy arrays if `model_fn` returns a `dict`. Returns an iterable of
predictions if as_iterable is True.
Raises:
ValueError: If x and input_fn are both provided or both `None`.
"""
_verify_input_args(x, None, input_fn, None, batch_size)
if x is not None and not as_iterable:
return SKCompat(self).predict(x, batch_size)
input_fn, feed_fn = _get_input_fn(x, None, input_fn, None, batch_size)
return self._infer_model(
input_fn=input_fn,
feed_fn=feed_fn,
outputs=outputs,
as_iterable=as_iterable)
def get_variable_value(self, name):
"""Returns value of the variable given by name.
Args:
name: string, name of the tensor.
Returns:
Numpy array - value of the tensor.
"""
return load_variable(self.model_dir, name)
def get_variable_names(self):
"""Returns list of all variable names in this model.
Returns:
List of names.
"""
return [name for name, _ in list_variables(self.model_dir)]
@property
def model_dir(self):
return self._model_dir
@deprecated('2017-03-25', 'Please use Estimator.export_savedmodel() instead.')
def export(self,
export_dir,
input_fn=export._default_input_fn, # pylint: disable=protected-access
input_feature_key=None,
use_deprecated_input_fn=True,
signature_fn=None,
prediction_key=None,
default_batch_size=1,
exports_to_keep=None,
checkpoint_path=None):
"""Exports inference graph into given dir.
Args:
export_dir: A string containing a directory to write the exported graph
and checkpoints.
input_fn: If `use_deprecated_input_fn` is true, then a function that given
`Tensor` of `Example` strings, parses it into features that are then
passed to the model. Otherwise, a function that takes no argument and
returns a tuple of (features, labels), where features is a dict of
string key to `Tensor` and labels is a `Tensor` that's currently not
used (and so can be `None`).
input_feature_key: Only used if `use_deprecated_input_fn` is false. String
key into the features dict returned by `input_fn` that corresponds to a
the raw `Example` strings `Tensor` that the exported model will take as
input. Can only be `None` if you're using a custom `signature_fn` that
does not use the first arg (examples).
use_deprecated_input_fn: Determines the signature format of `input_fn`.
signature_fn: Function that returns a default signature and a named
signature map, given `Tensor` of `Example` strings, `dict` of `Tensor`s
for features and `Tensor` or `dict` of `Tensor`s for predictions.
prediction_key: The key for a tensor in the `predictions` dict (output
from the `model_fn`) to use as the `predictions` input to the
`signature_fn`. Optional. If `None`, predictions will pass to
`signature_fn` without filtering.
default_batch_size: Default batch size of the `Example` placeholder.
exports_to_keep: Number of exports to keep.
checkpoint_path: the checkpoint path of the model to be exported. If it is
`None` (which is default), will use the latest checkpoint in
export_dir.
Returns:
The string path to the exported directory. NB: this functionality was
added ca. 2016/09/25; clients that depend on the return value may need
to handle the case where this function returns None because subclasses
are not returning a value.
"""
# pylint: disable=protected-access
return export._export_estimator(
estimator=self,
export_dir=export_dir,
signature_fn=signature_fn,
prediction_key=prediction_key,
input_fn=input_fn,
input_feature_key=input_feature_key,
use_deprecated_input_fn=use_deprecated_input_fn,
default_batch_size=default_batch_size,
exports_to_keep=exports_to_keep,
checkpoint_path=checkpoint_path)
@abc.abstractproperty
def _get_train_ops(self, features, labels):
"""Method that builds model graph and returns trainer ops.
Expected to be overridden by sub-classes that require custom support.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
labels: `Tensor` or `dict` of `Tensor` objects.
Returns:
A `ModelFnOps` object.
"""
pass
@abc.abstractproperty
def _get_predict_ops(self, features):
"""Method that builds model graph and returns prediction ops.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
Returns:
A `ModelFnOps` object.
"""
pass
def _get_eval_ops(self, features, labels, metrics):
"""Method that builds model graph and returns evaluation ops.
Expected to be overriden by sub-classes that require custom support.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
labels: `Tensor` or `dict` of `Tensor` objects.
metrics: Dict of metrics to run. If None, the default metric functions
are used; if {}, no metrics are used. Otherwise, `metrics` should map
friendly names for the metric to a `MetricSpec` object defining which
model outputs to evaluate against which labels with which metric
function. Metric ops should support streaming, e.g., returning
update_op and value tensors. See more details in
`../../../../metrics/python/metrics/ops/streaming_metrics.py` and
`../metric_spec.py`.
Returns:
A `ModelFnOps` object.
"""
raise NotImplementedError('_get_eval_ops not implemented in BaseEstimator')
@deprecated(
'2016-09-23',
'The signature of the input_fn accepted by export is changing to be '
'consistent with what\'s used by tf.Learn Estimator\'s train/evaluate, '
'which makes this function useless. This will be removed after the '
'deprecation date.')
def _get_feature_ops_from_example(self, examples_batch):
"""Returns feature parser for given example batch using features info.
This function requires `fit()` has been called.
Args:
examples_batch: batch of tf.Example
Returns:
features: `Tensor` or `dict` of `Tensor` objects.
Raises:
ValueError: If `_features_info` attribute is not available (usually
because `fit()` has not been called).
"""
if self._features_info is None:
raise ValueError('Features information missing, was fit() ever called?')
return tensor_signature.create_example_parser_from_signatures(
self._features_info, examples_batch)
def _check_inputs(self, features, labels):
if self._features_info is not None:
logging.debug('Given features: %s, required signatures: %s.',
str(features), str(self._features_info))
if not tensor_signature.tensors_compatible(features, self._features_info):
raise ValueError('Features are incompatible with given information. '
'Given features: %s, required signatures: %s.' %
(str(features), str(self._features_info)))
else:
self._features_info = tensor_signature.create_signatures(features)
logging.debug('Setting feature info to %s.', str(self._features_info))
if labels is not None:
if self._labels_info is not None:
logging.debug('Given labels: %s, required signatures: %s.',
str(labels), str(self._labels_info))
if not tensor_signature.tensors_compatible(labels, self._labels_info):
raise ValueError('Labels are incompatible with given information. '
'Given labels: %s, required signatures: %s.' %
(str(labels), str(self._labels_info)))
else:
self._labels_info = tensor_signature.create_signatures(labels)
logging.debug('Setting labels info to %s', str(self._labels_info))
def _extract_metric_update_ops(self, eval_dict):
"""Separate update operations from metric value operations."""
update_ops = []
value_ops = {}
for name, metric_ops in six.iteritems(eval_dict):
if isinstance(metric_ops, (list, tuple)):
if len(metric_ops) == 2:
value_ops[name] = metric_ops[0]
update_ops.append(metric_ops[1])
else:
logging.warning(
'Ignoring metric {}. It returned a list|tuple with len {}, '
'expected 2'.format(name, len(metric_ops)))
value_ops[name] = metric_ops
else:
value_ops[name] = metric_ops
if update_ops:
update_ops = control_flow_ops.group(*update_ops)
else:
update_ops = None
return update_ops, value_ops
def _evaluate_model(self,
input_fn,
steps,
feed_fn=None,
metrics=None,
name='',
checkpoint_path=None,
hooks=None,
log_progress=True):
# TODO(wicke): Remove this once Model and associated code are gone.
if (hasattr(self._config, 'execution_mode') and
self._config.execution_mode not in ('all', 'evaluate', 'eval_evalset')):
return None, None
# Check that model has been trained (if nothing has been set explicitly).
if not checkpoint_path:
latest_path = saver.latest_checkpoint(self._model_dir)
if not latest_path:
raise NotFittedError("Couldn't find trained model at %s."
% self._model_dir)
checkpoint_path = latest_path
# Setup output directory.
eval_dir = os.path.join(self._model_dir, 'eval' if not name else
'eval_' + name)
with ops.Graph().as_default() as g:
random_seed.set_random_seed(self._config.tf_random_seed)
global_step = contrib_framework.create_global_step(g)
features, labels = input_fn()
self._check_inputs(features, labels)
eval_dict = self._get_eval_ops(features, labels, metrics).eval_metric_ops
update_op, eval_dict = self._extract_metric_update_ops(eval_dict)
# We need to copy the hook array as we modify it, thus [:].
hooks = hooks[:] if hooks else []
if feed_fn:
hooks.append(basic_session_run_hooks.FeedFnHook(feed_fn))
if steps:
hooks.append(
evaluation.StopAfterNEvalsHook(
steps, log_progress=log_progress))
global_step_key = 'global_step'
while global_step_key in eval_dict:
global_step_key = '_' + global_step_key
eval_dict[global_step_key] = global_step
eval_results = evaluation.evaluate_once(
checkpoint_path=checkpoint_path,
master=self._config.evaluation_master,
eval_ops=update_op,
final_ops=eval_dict,
hooks=hooks,
config=config_pb2.ConfigProto(allow_soft_placement=True))
current_global_step = eval_results[global_step_key]
_write_dict_to_summary(eval_dir, eval_results, current_global_step)
return eval_results, current_global_step
def _get_features_from_input_fn(self, input_fn):
result = input_fn()
if isinstance(result, (list, tuple)):
return result[0]
return result
def _infer_model(self,
input_fn,
feed_fn=None,
outputs=None,
as_iterable=True,
iterate_batches=False):
# Check that model has been trained.
checkpoint_path = saver.latest_checkpoint(self._model_dir)
if not checkpoint_path:
raise NotFittedError("Couldn't find trained model at %s."
% self._model_dir)
with ops.Graph().as_default() as g:
random_seed.set_random_seed(self._config.tf_random_seed)
contrib_framework.create_global_step(g)
features = self._get_features_from_input_fn(input_fn)
infer_ops = self._get_predict_ops(features)
predictions = self._filter_predictions(infer_ops.predictions, outputs)
mon_sess = monitored_session.MonitoredSession(
session_creator=monitored_session.ChiefSessionCreator(
checkpoint_filename_with_path=checkpoint_path,
config=config_pb2.ConfigProto(allow_soft_placement=True)))
if not as_iterable:
with mon_sess:
if not mon_sess.should_stop():
return mon_sess.run(predictions, feed_fn() if feed_fn else None)
else:
return self._predict_generator(mon_sess, predictions, feed_fn,
iterate_batches)
def _predict_generator(self, mon_sess, predictions, feed_fn, iterate_batches):
with mon_sess:
while not mon_sess.should_stop():
preds = mon_sess.run(predictions, feed_fn() if feed_fn else None)
if iterate_batches:
yield preds
elif not isinstance(predictions, dict):
for pred in preds:
yield pred
else:
first_tensor = list(preds.values())[0]
if isinstance(first_tensor, sparse_tensor.SparseTensorValue):
batch_length = first_tensor.dense_shape[0]
else:
batch_length = first_tensor.shape[0]
for i in range(batch_length):
yield {key: value[i] for key, value in six.iteritems(preds)}
if self._is_input_constant(feed_fn, mon_sess.graph):
return
def _is_input_constant(self, feed_fn, graph):
# If there are no queue_runners, the input `predictions` is a
# constant, and we should stop after the first epoch. If,
# instead, there are queue_runners, eventually they should throw
# an `OutOfRangeError`.
if graph.get_collection(ops.GraphKeys.QUEUE_RUNNERS):
return False
# data_feeder uses feed_fn to generate `OutOfRangeError`.
if feed_fn is not None:
return False
return True
def _filter_predictions(self, predictions, outputs):
if not outputs:
return predictions
if not isinstance(predictions, dict):
raise ValueError(
'outputs argument is not valid in case of non-dict predictions.')
existing_keys = predictions.keys()
predictions = {
key: value
for key, value in six.iteritems(predictions) if key in outputs
}
if not predictions:
raise ValueError('Expected to run at least one output from %s, '
'provided %s.' % (existing_keys, outputs))
return predictions
def _train_model(self, input_fn, hooks):
all_hooks = []
self._graph = ops.Graph()
with self._graph.as_default() as g, g.device(self._device_fn):
random_seed.set_random_seed(self._config.tf_random_seed)
global_step = contrib_framework.create_global_step(g)
features, labels = input_fn()
self._check_inputs(features, labels)
model_fn_ops = self._get_train_ops(features, labels)
ops.add_to_collection(ops.GraphKeys.LOSSES, model_fn_ops.loss)
all_hooks.extend([
basic_session_run_hooks.NanTensorHook(model_fn_ops.loss),
basic_session_run_hooks.LoggingTensorHook(
{
'loss': model_fn_ops.loss,
'step': global_step
},
every_n_iter=100)
])
all_hooks.extend(hooks)
scaffold = model_fn_ops.scaffold or monitored_session.Scaffold()
if not (scaffold.saver or ops.get_collection(ops.GraphKeys.SAVERS)):
ops.add_to_collection(
ops.GraphKeys.SAVERS,
saver.Saver(
sharded=True,
max_to_keep=self._config.keep_checkpoint_max,
defer_build=True))
chief_hooks = []
if (self._config.save_checkpoints_secs or
self._config.save_checkpoints_steps):
saver_hook_exists = any([
isinstance(h, basic_session_run_hooks.CheckpointSaverHook)
for h in (all_hooks + model_fn_ops.training_hooks + chief_hooks +
model_fn_ops.training_chief_hooks)
])
if not saver_hook_exists:
chief_hooks = [
basic_session_run_hooks.CheckpointSaverHook(
self._model_dir,
save_secs=self._config.save_checkpoints_secs,
save_steps=self._config.save_checkpoints_steps,
scaffold=scaffold)
]
with monitored_session.MonitoredTrainingSession(
master=self._config.master,
is_chief=self._config.is_chief,
checkpoint_dir=self._model_dir,
scaffold=scaffold,
hooks=all_hooks + model_fn_ops.training_hooks,
chief_only_hooks=chief_hooks + model_fn_ops.training_chief_hooks,
save_checkpoint_secs=0, # Saving is handled by a hook.
save_summaries_steps=self._config.save_summary_steps,
config=config_pb2.ConfigProto(allow_soft_placement=True)
) as mon_sess:
loss = None
while not mon_sess.should_stop():
_, loss = mon_sess.run([model_fn_ops.train_op, model_fn_ops.loss])
summary_io.SummaryWriterCache.clear()
return loss
def _identity_feature_engineering_fn(features, labels):
return features, labels
class Estimator(BaseEstimator):
"""Estimator class is the basic TensorFlow model trainer/evaluator.
"""
def __init__(self,
model_fn=None,
model_dir=None,
config=None,
params=None,
feature_engineering_fn=None):
"""Constructs an `Estimator` instance.
Args:
model_fn: Model function. Follows the signature:
* Args:
* `features`: single `Tensor` or `dict` of `Tensor`s
(depending on data passed to `fit`),
* `labels`: `Tensor` or `dict` of `Tensor`s (for multi-head
models). If mode is `ModeKeys.INFER`, `labels=None` will be
passed. If the `model_fn`'s signature does not accept
`mode`, the `model_fn` must still be able to handle
`labels=None`.
* `mode`: Optional. Specifies if this training, evaluation or
prediction. See `ModeKeys`.
* `params`: Optional `dict` of hyperparameters. Will receive what
is passed to Estimator in `params` parameter. This allows
to configure Estimators from hyper parameter tuning.
* `config`: Optional configuration object. Will receive what is passed
to Estimator in `config` parameter, or the default `config`.
Allows updating things in your model_fn based on configuration
such as `num_ps_replicas`.
* `model_dir`: Optional directory where model parameters, graph etc
are saved. Will receive what is passed to Estimator in
`model_dir` parameter, or the default `model_dir`. Allows
updating things in your model_fn that expect model_dir, such as
training hooks.
* Returns:
`ModelFnOps`
Also supports a legacy signature which returns tuple of:
* predictions: `Tensor`, `SparseTensor` or dictionary of same.
Can also be any type that is convertible to a `Tensor` or
`SparseTensor`, or dictionary of same.
* loss: Scalar loss `Tensor`.
* train_op: Training update `Tensor` or `Operation`.
Supports next three signatures for the function:
* `(features, labels) -> (predictions, loss, train_op)`
* `(features, labels, mode) -> (predictions, loss, train_op)`
* `(features, labels, mode, params) -> (predictions, loss, train_op)`
* `(features, labels, mode, params, config) ->
(predictions, loss, train_op)`
* `(features, labels, mode, params, config, model_dir) ->
(predictions, loss, train_op)`
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator to
continue training a previously saved model.
config: Configuration object.
params: `dict` of hyper parameters that will be passed into `model_fn`.
Keys are names of parameters, values are basic python types.
feature_engineering_fn: Feature engineering function. Takes features and
labels which are the output of `input_fn` and
returns features and labels which will be fed
into `model_fn`. Please check `model_fn` for
a definition of features and labels.
Raises:
ValueError: parameters of `model_fn` don't match `params`.
"""
super(Estimator, self).__init__(model_dir=model_dir, config=config)
if model_fn is not None:
# Check number of arguments of the given function matches requirements.
model_fn_args = _get_arguments(model_fn)
if params is not None and 'params' not in model_fn_args:
raise ValueError('Estimator\'s model_fn (%s) has less than 4 '
'arguments, but not None params (%s) are passed.' %
(model_fn, params))
if params is None and 'params' in model_fn_args:
logging.warning('Estimator\'s model_fn (%s) includes params '
'argument, but params are not passed to Estimator.',
model_fn)
self._model_fn = model_fn
self.params = params
self._feature_engineering_fn = (
feature_engineering_fn or _identity_feature_engineering_fn)
def _call_model_fn(self, features, labels, mode):
"""Calls model function with support of 2, 3 or 4 arguments.
Args:
features: features dict.
labels: labels dict.
mode: ModeKeys
Returns:
A `ModelFnOps` object. If model_fn returns a tuple, wraps them up in a
`ModelFnOps` object.
Raises:
ValueError: if model_fn returns invalid objects.
"""
features, labels = self._feature_engineering_fn(features, labels)
model_fn_args = _get_arguments(self._model_fn)
kwargs = {}
if 'mode' in model_fn_args:
kwargs['mode'] = mode
if 'params' in model_fn_args:
kwargs['params'] = self.params
if 'config' in model_fn_args:
kwargs['config'] = self.config
if 'model_dir' in model_fn_args:
kwargs['model_dir'] = self.model_dir
model_fn_results = self._model_fn(features, labels, **kwargs)
if isinstance(model_fn_results, model_fn_lib.ModelFnOps):
return model_fn_results
# Here model_fn_ops should be a tuple with 3 elements.
if len(model_fn_results) != 3:
raise ValueError('Unrecognized value returned by model_fn, '
'please return ModelFnOps.')
return model_fn_lib.ModelFnOps(
mode=mode,
predictions=model_fn_results[0],
loss=model_fn_results[1],
train_op=model_fn_results[2])
def _get_train_ops(self, features, labels):
"""Method that builds model graph and returns trainer ops.
Expected to be overriden by sub-classes that require custom support.
This implementation uses `model_fn` passed as parameter to constructor to
build model.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
labels: `Tensor` or `dict` of `Tensor` objects.
Returns:
`ModelFnOps` object.
"""
return self._call_model_fn(features, labels, model_fn_lib.ModeKeys.TRAIN)
def _get_eval_ops(self, features, labels, metrics):
"""Method that builds model graph and returns evaluation ops.
Expected to be overriden by sub-classes that require custom support.
This implementation uses `model_fn` passed as parameter to constructor to
build model.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
labels: `Tensor` or `dict` of `Tensor` objects.
metrics: Dict of metrics to run. If None, the default metric functions
are used; if {}, no metrics are used. Otherwise, `metrics` should map
friendly names for the metric to a `MetricSpec` object defining which
model outputs to evaluate against which labels with which metric
function. Metric ops should support streaming, e.g., returning
update_op and value tensors. See more details in
`../../../../metrics/python/metrics/ops/streaming_metrics.py` and
`../metric_spec.py`.
Returns:
`ModelFnOps` object.
Raises:
ValueError: if `metrics` don't match `labels`.
"""
model_fn_ops = self._call_model_fn(
features, labels, model_fn_lib.ModeKeys.EVAL)
features, labels = self._feature_engineering_fn(features, labels)
# Custom metrics should overwrite defaults.
if metrics:
model_fn_ops.eval_metric_ops.update(_make_metrics_ops(
metrics, features, labels, model_fn_ops.predictions))
if metric_key.MetricKey.LOSS not in model_fn_ops.eval_metric_ops:
model_fn_ops.eval_metric_ops[metric_key.MetricKey.LOSS] = (
metrics_lib.streaming_mean(model_fn_ops.loss))
return model_fn_ops
def _get_predict_ops(self, features):
"""Method that builds model graph and returns prediction ops.
Expected to be overriden by sub-classes that require custom support.
This implementation uses `model_fn` passed as parameter to constructor to
build model.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
Returns:
`ModelFnOps` object.
"""
labels = tensor_signature.create_placeholders_from_signatures(
self._labels_info)
return self._call_model_fn(features, labels, model_fn_lib.ModeKeys.INFER)
def export_savedmodel(
self, export_dir_base, serving_input_fn,
default_output_alternative_key=None,
assets_extra=None,
as_text=False,
checkpoint_path=None):
"""Exports inference graph as a SavedModel into given dir.
Args:
export_dir_base: A string containing a directory to write the exported
graph and checkpoints.
serving_input_fn: A function that takes no argument and
returns an `InputFnOps`.
default_output_alternative_key: the name of the head to serve when none is
specified. Not needed for single-headed models.
assets_extra: A dict specifying how to populate the assets.extra directory
within the exported SavedModel. Each key should give the destination
path (including the filename) relative to the assets.extra directory.
The corresponding value gives the full path of the source file to be
copied. For example, the simple case of copying a single file without
renaming it is specified as
`{'my_asset_file.txt': '/path/to/my_asset_file.txt'}`.
as_text: whether to write the SavedModel proto in text format.
checkpoint_path: The checkpoint path to export. If None (the default),
the most recent checkpoint found within the model directory is chosen.
Returns:
The string path to the exported directory.
Raises:
ValueError: if an unrecognized export_type is requested.
"""
if serving_input_fn is None:
raise ValueError('serving_input_fn must be defined.')
with ops.Graph().as_default() as g:
contrib_variables.create_global_step(g)
# Call the serving_input_fn and collect the input alternatives.
input_ops = serving_input_fn()
input_alternatives, features = (
saved_model_export_utils.get_input_alternatives(input_ops))
# Call the model_fn and collect the output alternatives.
model_fn_ops = self._call_model_fn(features, None,
model_fn_lib.ModeKeys.INFER)
output_alternatives, actual_default_output_alternative_key = (
saved_model_export_utils.get_output_alternatives(
model_fn_ops, default_output_alternative_key))
# Build the SignatureDefs from all pairs of input and output alternatives
signature_def_map = saved_model_export_utils.build_all_signature_defs(
input_alternatives, output_alternatives,
actual_default_output_alternative_key)
if not checkpoint_path:
# Locate the latest checkpoint
checkpoint_path = saver.latest_checkpoint(self._model_dir)
if not checkpoint_path:
raise NotFittedError("Couldn't find trained model at %s."
% self._model_dir)
export_dir = saved_model_export_utils.get_timestamped_export_dir(
export_dir_base)
with tf_session.Session('') as session:
variables.initialize_local_variables()
data_flow_ops.tables_initializer()
saver_for_restore = saver.Saver(
variables.global_variables(),
sharded=True)
saver_for_restore.restore(session, checkpoint_path)
init_op = control_flow_ops.group(
variables.local_variables_initializer(),
data_flow_ops.tables_initializer())
# Perform the export
builder = saved_model_builder.SavedModelBuilder(export_dir)
builder.add_meta_graph_and_variables(
session, [tag_constants.SERVING],
signature_def_map=signature_def_map,
assets_collection=ops.get_collection(
ops.GraphKeys.ASSET_FILEPATHS),
legacy_init_op=init_op)
builder.save(as_text)
# Add the extra assets
if assets_extra:
assets_extra_path = os.path.join(compat.as_bytes(export_dir),
compat.as_bytes('assets.extra'))
for dest_relative, source in assets_extra.items():
dest_absolute = os.path.join(compat.as_bytes(assets_extra_path),
compat.as_bytes(dest_relative))
dest_path = os.path.dirname(dest_absolute)
gfile.MakeDirs(dest_path)
gfile.Copy(source, dest_absolute)
return export_dir
# For time of deprecation x,y from Estimator allow direct access.
# pylint: disable=protected-access
class SKCompat(sklearn.BaseEstimator):
"""Scikit learn wrapper for TensorFlow Learn Estimator."""
def __init__(self, estimator):
self._estimator = estimator
def fit(self, x, y, batch_size=128, steps=None, max_steps=None,
monitors=None):
input_fn, feed_fn = _get_input_fn(x, y, input_fn=None, feed_fn=None,
batch_size=batch_size, shuffle=True,
epochs=None)
all_monitors = []
if feed_fn:
all_monitors = [basic_session_run_hooks.FeedFnHook(feed_fn)]
if monitors:
all_monitors.extend(monitors)
self._estimator.fit(input_fn=input_fn,
steps=steps,
max_steps=max_steps,
monitors=all_monitors)
return self
def score(self, x, y, batch_size=128, steps=None, metrics=None):
input_fn, feed_fn = _get_input_fn(x, y, input_fn=None,
feed_fn=None, batch_size=batch_size,
shuffle=False, epochs=1)
if metrics is not None and not isinstance(metrics, dict):
raise ValueError('Metrics argument should be None or dict. '
'Got %s.' % metrics)
eval_results, global_step = self._estimator._evaluate_model(
input_fn=input_fn,
feed_fn=feed_fn,
steps=steps,
metrics=metrics,
name='score')
if eval_results is not None:
eval_results.update({'global_step': global_step})
return eval_results
def predict(self, x, batch_size=128, outputs=None):
input_fn, feed_fn = _get_input_fn(
x, None, input_fn=None, feed_fn=None, batch_size=batch_size,
shuffle=False, epochs=1)
results = list(
self._estimator._infer_model(
input_fn=input_fn,
feed_fn=feed_fn,
outputs=outputs,
as_iterable=True,
iterate_batches=True))
if not isinstance(results[0], dict):
return np.concatenate([output for output in results], axis=0)
return {
key: np.concatenate(
[output[key] for output in results], axis=0)
for key in results[0]
}
| apache-2.0 |
potash/scikit-learn | sklearn/manifold/locally_linear.py | 37 | 25852 | """Locally Linear Embedding"""
# Author: Fabian Pedregosa -- <[email protected]>
# Jake Vanderplas -- <[email protected]>
# License: BSD 3 clause (C) INRIA 2011
import numpy as np
from scipy.linalg import eigh, svd, qr, solve
from scipy.sparse import eye, csr_matrix
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_random_state, check_array
from ..utils.arpack import eigsh
from ..utils.validation import check_is_fitted
from ..utils.validation import FLOAT_DTYPES
from ..neighbors import NearestNeighbors
def barycenter_weights(X, Z, reg=1e-3):
"""Compute barycenter weights of X from Y along the first axis
We estimate the weights to assign to each point in Y[i] to recover
the point X[i]. The barycenter weights sum to 1.
Parameters
----------
X : array-like, shape (n_samples, n_dim)
Z : array-like, shape (n_samples, n_neighbors, n_dim)
reg: float, optional
amount of regularization to add for the problem to be
well-posed in the case of n_neighbors > n_dim
Returns
-------
B : array-like, shape (n_samples, n_neighbors)
Notes
-----
See developers note for more information.
"""
X = check_array(X, dtype=FLOAT_DTYPES)
Z = check_array(Z, dtype=FLOAT_DTYPES, allow_nd=True)
n_samples, n_neighbors = X.shape[0], Z.shape[1]
B = np.empty((n_samples, n_neighbors), dtype=X.dtype)
v = np.ones(n_neighbors, dtype=X.dtype)
# this might raise a LinalgError if G is singular and has trace
# zero
for i, A in enumerate(Z.transpose(0, 2, 1)):
C = A.T - X[i] # broadcasting
G = np.dot(C, C.T)
trace = np.trace(G)
if trace > 0:
R = reg * trace
else:
R = reg
G.flat[::Z.shape[1] + 1] += R
w = solve(G, v, sym_pos=True)
B[i, :] = w / np.sum(w)
return B
def barycenter_kneighbors_graph(X, n_neighbors, reg=1e-3, n_jobs=1):
"""Computes the barycenter weighted graph of k-Neighbors for points in X
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree, NearestNeighbors}
Sample data, shape = (n_samples, n_features), in the form of a
numpy array, sparse array, precomputed tree, or NearestNeighbors
object.
n_neighbors : int
Number of neighbors for each sample.
reg : float, optional
Amount of regularization when solving the least-squares
problem. Only relevant if mode='barycenter'. If None, use the
default.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run for neighbors search.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
See also
--------
sklearn.neighbors.kneighbors_graph
sklearn.neighbors.radius_neighbors_graph
"""
knn = NearestNeighbors(n_neighbors + 1, n_jobs=n_jobs).fit(X)
X = knn._fit_X
n_samples = X.shape[0]
ind = knn.kneighbors(X, return_distance=False)[:, 1:]
data = barycenter_weights(X, X[ind], reg=reg)
indptr = np.arange(0, n_samples * n_neighbors + 1, n_neighbors)
return csr_matrix((data.ravel(), ind.ravel(), indptr),
shape=(n_samples, n_samples))
def null_space(M, k, k_skip=1, eigen_solver='arpack', tol=1E-6, max_iter=100,
random_state=None):
"""
Find the null space of a matrix M.
Parameters
----------
M : {array, matrix, sparse matrix, LinearOperator}
Input covariance matrix: should be symmetric positive semi-definite
k : integer
Number of eigenvalues/vectors to return
k_skip : integer, optional
Number of low eigenvalues to skip.
eigen_solver : string, {'auto', 'arpack', 'dense'}
auto : algorithm will attempt to choose the best method for input data
arpack : use arnoldi iteration in shift-invert mode.
For this method, M may be a dense matrix, sparse matrix,
or general linear operator.
Warning: ARPACK can be unstable for some problems. It is
best to try several random seeds in order to check results.
dense : use standard dense matrix operations for the eigenvalue
decomposition. For this method, M must be an array
or matrix type. This method should be avoided for
large problems.
tol : float, optional
Tolerance for 'arpack' method.
Not used if eigen_solver=='dense'.
max_iter : maximum number of iterations for 'arpack' method
not used if eigen_solver=='dense'
random_state: numpy.RandomState or int, optional
The generator or seed used to determine the starting vector for arpack
iterations. Defaults to numpy.random.
"""
if eigen_solver == 'auto':
if M.shape[0] > 200 and k + k_skip < 10:
eigen_solver = 'arpack'
else:
eigen_solver = 'dense'
if eigen_solver == 'arpack':
random_state = check_random_state(random_state)
# initialize with [-1,1] as in ARPACK
v0 = random_state.uniform(-1, 1, M.shape[0])
try:
eigen_values, eigen_vectors = eigsh(M, k + k_skip, sigma=0.0,
tol=tol, maxiter=max_iter,
v0=v0)
except RuntimeError as msg:
raise ValueError("Error in determining null-space with ARPACK. "
"Error message: '%s'. "
"Note that method='arpack' can fail when the "
"weight matrix is singular or otherwise "
"ill-behaved. method='dense' is recommended. "
"See online documentation for more information."
% msg)
return eigen_vectors[:, k_skip:], np.sum(eigen_values[k_skip:])
elif eigen_solver == 'dense':
if hasattr(M, 'toarray'):
M = M.toarray()
eigen_values, eigen_vectors = eigh(
M, eigvals=(k_skip, k + k_skip - 1), overwrite_a=True)
index = np.argsort(np.abs(eigen_values))
return eigen_vectors[:, index], np.sum(eigen_values)
else:
raise ValueError("Unrecognized eigen_solver '%s'" % eigen_solver)
def locally_linear_embedding(
X, n_neighbors, n_components, reg=1e-3, eigen_solver='auto', tol=1e-6,
max_iter=100, method='standard', hessian_tol=1E-4, modified_tol=1E-12,
random_state=None, n_jobs=1):
"""Perform a Locally Linear Embedding analysis on the data.
Read more in the :ref:`User Guide <locally_linear_embedding>`.
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree, NearestNeighbors}
Sample data, shape = (n_samples, n_features), in the form of a
numpy array, sparse array, precomputed tree, or NearestNeighbors
object.
n_neighbors : integer
number of neighbors to consider for each point.
n_components : integer
number of coordinates for the manifold.
reg : float
regularization constant, multiplies the trace of the local covariance
matrix of the distances.
eigen_solver : string, {'auto', 'arpack', 'dense'}
auto : algorithm will attempt to choose the best method for input data
arpack : use arnoldi iteration in shift-invert mode.
For this method, M may be a dense matrix, sparse matrix,
or general linear operator.
Warning: ARPACK can be unstable for some problems. It is
best to try several random seeds in order to check results.
dense : use standard dense matrix operations for the eigenvalue
decomposition. For this method, M must be an array
or matrix type. This method should be avoided for
large problems.
tol : float, optional
Tolerance for 'arpack' method
Not used if eigen_solver=='dense'.
max_iter : integer
maximum number of iterations for the arpack solver.
method : {'standard', 'hessian', 'modified', 'ltsa'}
standard : use the standard locally linear embedding algorithm.
see reference [1]_
hessian : use the Hessian eigenmap method. This method requires
n_neighbors > n_components * (1 + (n_components + 1) / 2.
see reference [2]_
modified : use the modified locally linear embedding algorithm.
see reference [3]_
ltsa : use local tangent space alignment algorithm
see reference [4]_
hessian_tol : float, optional
Tolerance for Hessian eigenmapping method.
Only used if method == 'hessian'
modified_tol : float, optional
Tolerance for modified LLE method.
Only used if method == 'modified'
random_state: numpy.RandomState or int, optional
The generator or seed used to determine the starting vector for arpack
iterations. Defaults to numpy.random.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run for neighbors search.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Returns
-------
Y : array-like, shape [n_samples, n_components]
Embedding vectors.
squared_error : float
Reconstruction error for the embedding vectors. Equivalent to
``norm(Y - W Y, 'fro')**2``, where W are the reconstruction weights.
References
----------
.. [1] `Roweis, S. & Saul, L. Nonlinear dimensionality reduction
by locally linear embedding. Science 290:2323 (2000).`
.. [2] `Donoho, D. & Grimes, C. Hessian eigenmaps: Locally
linear embedding techniques for high-dimensional data.
Proc Natl Acad Sci U S A. 100:5591 (2003).`
.. [3] `Zhang, Z. & Wang, J. MLLE: Modified Locally Linear
Embedding Using Multiple Weights.`
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.70.382
.. [4] `Zhang, Z. & Zha, H. Principal manifolds and nonlinear
dimensionality reduction via tangent space alignment.
Journal of Shanghai Univ. 8:406 (2004)`
"""
if eigen_solver not in ('auto', 'arpack', 'dense'):
raise ValueError("unrecognized eigen_solver '%s'" % eigen_solver)
if method not in ('standard', 'hessian', 'modified', 'ltsa'):
raise ValueError("unrecognized method '%s'" % method)
nbrs = NearestNeighbors(n_neighbors=n_neighbors + 1, n_jobs=n_jobs)
nbrs.fit(X)
X = nbrs._fit_X
N, d_in = X.shape
if n_components > d_in:
raise ValueError("output dimension must be less than or equal "
"to input dimension")
if n_neighbors >= N:
raise ValueError("n_neighbors must be less than number of points")
if n_neighbors <= 0:
raise ValueError("n_neighbors must be positive")
M_sparse = (eigen_solver != 'dense')
if method == 'standard':
W = barycenter_kneighbors_graph(
nbrs, n_neighbors=n_neighbors, reg=reg, n_jobs=n_jobs)
# we'll compute M = (I-W)'(I-W)
# depending on the solver, we'll do this differently
if M_sparse:
M = eye(*W.shape, format=W.format) - W
M = (M.T * M).tocsr()
else:
M = (W.T * W - W.T - W).toarray()
M.flat[::M.shape[0] + 1] += 1 # W = W - I = W - I
elif method == 'hessian':
dp = n_components * (n_components + 1) // 2
if n_neighbors <= n_components + dp:
raise ValueError("for method='hessian', n_neighbors must be "
"greater than "
"[n_components * (n_components + 3) / 2]")
neighbors = nbrs.kneighbors(X, n_neighbors=n_neighbors + 1,
return_distance=False)
neighbors = neighbors[:, 1:]
Yi = np.empty((n_neighbors, 1 + n_components + dp), dtype=np.float64)
Yi[:, 0] = 1
M = np.zeros((N, N), dtype=np.float64)
use_svd = (n_neighbors > d_in)
for i in range(N):
Gi = X[neighbors[i]]
Gi -= Gi.mean(0)
# build Hessian estimator
if use_svd:
U = svd(Gi, full_matrices=0)[0]
else:
Ci = np.dot(Gi, Gi.T)
U = eigh(Ci)[1][:, ::-1]
Yi[:, 1:1 + n_components] = U[:, :n_components]
j = 1 + n_components
for k in range(n_components):
Yi[:, j:j + n_components - k] = (U[:, k:k + 1] *
U[:, k:n_components])
j += n_components - k
Q, R = qr(Yi)
w = Q[:, n_components + 1:]
S = w.sum(0)
S[np.where(abs(S) < hessian_tol)] = 1
w /= S
nbrs_x, nbrs_y = np.meshgrid(neighbors[i], neighbors[i])
M[nbrs_x, nbrs_y] += np.dot(w, w.T)
if M_sparse:
M = csr_matrix(M)
elif method == 'modified':
if n_neighbors < n_components:
raise ValueError("modified LLE requires "
"n_neighbors >= n_components")
neighbors = nbrs.kneighbors(X, n_neighbors=n_neighbors + 1,
return_distance=False)
neighbors = neighbors[:, 1:]
# find the eigenvectors and eigenvalues of each local covariance
# matrix. We want V[i] to be a [n_neighbors x n_neighbors] matrix,
# where the columns are eigenvectors
V = np.zeros((N, n_neighbors, n_neighbors))
nev = min(d_in, n_neighbors)
evals = np.zeros([N, nev])
# choose the most efficient way to find the eigenvectors
use_svd = (n_neighbors > d_in)
if use_svd:
for i in range(N):
X_nbrs = X[neighbors[i]] - X[i]
V[i], evals[i], _ = svd(X_nbrs,
full_matrices=True)
evals **= 2
else:
for i in range(N):
X_nbrs = X[neighbors[i]] - X[i]
C_nbrs = np.dot(X_nbrs, X_nbrs.T)
evi, vi = eigh(C_nbrs)
evals[i] = evi[::-1]
V[i] = vi[:, ::-1]
# find regularized weights: this is like normal LLE.
# because we've already computed the SVD of each covariance matrix,
# it's faster to use this rather than np.linalg.solve
reg = 1E-3 * evals.sum(1)
tmp = np.dot(V.transpose(0, 2, 1), np.ones(n_neighbors))
tmp[:, :nev] /= evals + reg[:, None]
tmp[:, nev:] /= reg[:, None]
w_reg = np.zeros((N, n_neighbors))
for i in range(N):
w_reg[i] = np.dot(V[i], tmp[i])
w_reg /= w_reg.sum(1)[:, None]
# calculate eta: the median of the ratio of small to large eigenvalues
# across the points. This is used to determine s_i, below
rho = evals[:, n_components:].sum(1) / evals[:, :n_components].sum(1)
eta = np.median(rho)
# find s_i, the size of the "almost null space" for each point:
# this is the size of the largest set of eigenvalues
# such that Sum[v; v in set]/Sum[v; v not in set] < eta
s_range = np.zeros(N, dtype=int)
evals_cumsum = np.cumsum(evals, 1)
eta_range = evals_cumsum[:, -1:] / evals_cumsum[:, :-1] - 1
for i in range(N):
s_range[i] = np.searchsorted(eta_range[i, ::-1], eta)
s_range += n_neighbors - nev # number of zero eigenvalues
# Now calculate M.
# This is the [N x N] matrix whose null space is the desired embedding
M = np.zeros((N, N), dtype=np.float64)
for i in range(N):
s_i = s_range[i]
# select bottom s_i eigenvectors and calculate alpha
Vi = V[i, :, n_neighbors - s_i:]
alpha_i = np.linalg.norm(Vi.sum(0)) / np.sqrt(s_i)
# compute Householder matrix which satisfies
# Hi*Vi.T*ones(n_neighbors) = alpha_i*ones(s)
# using prescription from paper
h = alpha_i * np.ones(s_i) - np.dot(Vi.T, np.ones(n_neighbors))
norm_h = np.linalg.norm(h)
if norm_h < modified_tol:
h *= 0
else:
h /= norm_h
# Householder matrix is
# >> Hi = np.identity(s_i) - 2*np.outer(h,h)
# Then the weight matrix is
# >> Wi = np.dot(Vi,Hi) + (1-alpha_i) * w_reg[i,:,None]
# We do this much more efficiently:
Wi = (Vi - 2 * np.outer(np.dot(Vi, h), h) +
(1 - alpha_i) * w_reg[i, :, None])
# Update M as follows:
# >> W_hat = np.zeros( (N,s_i) )
# >> W_hat[neighbors[i],:] = Wi
# >> W_hat[i] -= 1
# >> M += np.dot(W_hat,W_hat.T)
# We can do this much more efficiently:
nbrs_x, nbrs_y = np.meshgrid(neighbors[i], neighbors[i])
M[nbrs_x, nbrs_y] += np.dot(Wi, Wi.T)
Wi_sum1 = Wi.sum(1)
M[i, neighbors[i]] -= Wi_sum1
M[neighbors[i], i] -= Wi_sum1
M[i, i] += s_i
if M_sparse:
M = csr_matrix(M)
elif method == 'ltsa':
neighbors = nbrs.kneighbors(X, n_neighbors=n_neighbors + 1,
return_distance=False)
neighbors = neighbors[:, 1:]
M = np.zeros((N, N))
use_svd = (n_neighbors > d_in)
for i in range(N):
Xi = X[neighbors[i]]
Xi -= Xi.mean(0)
# compute n_components largest eigenvalues of Xi * Xi^T
if use_svd:
v = svd(Xi, full_matrices=True)[0]
else:
Ci = np.dot(Xi, Xi.T)
v = eigh(Ci)[1][:, ::-1]
Gi = np.zeros((n_neighbors, n_components + 1))
Gi[:, 1:] = v[:, :n_components]
Gi[:, 0] = 1. / np.sqrt(n_neighbors)
GiGiT = np.dot(Gi, Gi.T)
nbrs_x, nbrs_y = np.meshgrid(neighbors[i], neighbors[i])
M[nbrs_x, nbrs_y] -= GiGiT
M[neighbors[i], neighbors[i]] += 1
return null_space(M, n_components, k_skip=1, eigen_solver=eigen_solver,
tol=tol, max_iter=max_iter, random_state=random_state)
class LocallyLinearEmbedding(BaseEstimator, TransformerMixin):
"""Locally Linear Embedding
Read more in the :ref:`User Guide <locally_linear_embedding>`.
Parameters
----------
n_neighbors : integer
number of neighbors to consider for each point.
n_components : integer
number of coordinates for the manifold
reg : float
regularization constant, multiplies the trace of the local covariance
matrix of the distances.
eigen_solver : string, {'auto', 'arpack', 'dense'}
auto : algorithm will attempt to choose the best method for input data
arpack : use arnoldi iteration in shift-invert mode.
For this method, M may be a dense matrix, sparse matrix,
or general linear operator.
Warning: ARPACK can be unstable for some problems. It is
best to try several random seeds in order to check results.
dense : use standard dense matrix operations for the eigenvalue
decomposition. For this method, M must be an array
or matrix type. This method should be avoided for
large problems.
tol : float, optional
Tolerance for 'arpack' method
Not used if eigen_solver=='dense'.
max_iter : integer
maximum number of iterations for the arpack solver.
Not used if eigen_solver=='dense'.
method : string ('standard', 'hessian', 'modified' or 'ltsa')
standard : use the standard locally linear embedding algorithm. see
reference [1]
hessian : use the Hessian eigenmap method. This method requires
``n_neighbors > n_components * (1 + (n_components + 1) / 2``
see reference [2]
modified : use the modified locally linear embedding algorithm.
see reference [3]
ltsa : use local tangent space alignment algorithm
see reference [4]
hessian_tol : float, optional
Tolerance for Hessian eigenmapping method.
Only used if ``method == 'hessian'``
modified_tol : float, optional
Tolerance for modified LLE method.
Only used if ``method == 'modified'``
neighbors_algorithm : string ['auto'|'brute'|'kd_tree'|'ball_tree']
algorithm to use for nearest neighbors search,
passed to neighbors.NearestNeighbors instance
random_state: numpy.RandomState or int, optional
The generator or seed used to determine the starting vector for arpack
iterations. Defaults to numpy.random.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Attributes
----------
embedding_vectors_ : array-like, shape [n_components, n_samples]
Stores the embedding vectors
reconstruction_error_ : float
Reconstruction error associated with `embedding_vectors_`
nbrs_ : NearestNeighbors object
Stores nearest neighbors instance, including BallTree or KDtree
if applicable.
References
----------
.. [1] `Roweis, S. & Saul, L. Nonlinear dimensionality reduction
by locally linear embedding. Science 290:2323 (2000).`
.. [2] `Donoho, D. & Grimes, C. Hessian eigenmaps: Locally
linear embedding techniques for high-dimensional data.
Proc Natl Acad Sci U S A. 100:5591 (2003).`
.. [3] `Zhang, Z. & Wang, J. MLLE: Modified Locally Linear
Embedding Using Multiple Weights.`
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.70.382
.. [4] `Zhang, Z. & Zha, H. Principal manifolds and nonlinear
dimensionality reduction via tangent space alignment.
Journal of Shanghai Univ. 8:406 (2004)`
"""
def __init__(self, n_neighbors=5, n_components=2, reg=1E-3,
eigen_solver='auto', tol=1E-6, max_iter=100,
method='standard', hessian_tol=1E-4, modified_tol=1E-12,
neighbors_algorithm='auto', random_state=None, n_jobs=1):
self.n_neighbors = n_neighbors
self.n_components = n_components
self.reg = reg
self.eigen_solver = eigen_solver
self.tol = tol
self.max_iter = max_iter
self.method = method
self.hessian_tol = hessian_tol
self.modified_tol = modified_tol
self.random_state = random_state
self.neighbors_algorithm = neighbors_algorithm
self.n_jobs = n_jobs
def _fit_transform(self, X):
self.nbrs_ = NearestNeighbors(self.n_neighbors,
algorithm=self.neighbors_algorithm,
n_jobs=self.n_jobs)
random_state = check_random_state(self.random_state)
X = check_array(X)
self.nbrs_.fit(X)
self.embedding_, self.reconstruction_error_ = \
locally_linear_embedding(
self.nbrs_, self.n_neighbors, self.n_components,
eigen_solver=self.eigen_solver, tol=self.tol,
max_iter=self.max_iter, method=self.method,
hessian_tol=self.hessian_tol, modified_tol=self.modified_tol,
random_state=random_state, reg=self.reg, n_jobs=self.n_jobs)
def fit(self, X, y=None):
"""Compute the embedding vectors for data X
Parameters
----------
X : array-like of shape [n_samples, n_features]
training set.
Returns
-------
self : returns an instance of self.
"""
self._fit_transform(X)
return self
def fit_transform(self, X, y=None):
"""Compute the embedding vectors for data X and transform X.
Parameters
----------
X : array-like of shape [n_samples, n_features]
training set.
Returns
-------
X_new: array-like, shape (n_samples, n_components)
"""
self._fit_transform(X)
return self.embedding_
def transform(self, X):
"""
Transform new points into embedding space.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
X_new : array, shape = [n_samples, n_components]
Notes
-----
Because of scaling performed by this method, it is discouraged to use
it together with methods that are not scale-invariant (like SVMs)
"""
check_is_fitted(self, "nbrs_")
X = check_array(X)
ind = self.nbrs_.kneighbors(X, n_neighbors=self.n_neighbors,
return_distance=False)
weights = barycenter_weights(X, self.nbrs_._fit_X[ind],
reg=self.reg)
X_new = np.empty((X.shape[0], self.n_components))
for i in range(X.shape[0]):
X_new[i] = np.dot(self.embedding_[ind[i]].T, weights[i])
return X_new
| bsd-3-clause |
bdh1011/wau | venv/lib/python2.7/site-packages/pandas/tseries/tests/test_period.py | 1 | 120042 | """Tests suite for Period handling.
Parts derived from scikits.timeseries code, original authors:
- Pierre Gerard-Marchant & Matt Knox
- pierregm_at_uga_dot_edu - mattknow_ca_at_hotmail_dot_com
"""
from datetime import datetime, date, timedelta
from numpy.ma.testutils import assert_equal
from pandas import Timestamp
from pandas.tseries.frequencies import MONTHS, DAYS, _period_code_map
from pandas.tseries.period import Period, PeriodIndex, period_range
from pandas.tseries.index import DatetimeIndex, date_range, Index
from pandas.tseries.tools import to_datetime
import pandas.tseries.period as period
import pandas.tseries.offsets as offsets
import pandas.core.datetools as datetools
import pandas as pd
import numpy as np
from numpy.random import randn
from pandas.compat import range, lrange, lmap, zip
from pandas import Series, TimeSeries, DataFrame, _np_version_under1p9
from pandas import tslib
from pandas.util.testing import(assert_series_equal, assert_almost_equal,
assertRaisesRegexp)
import pandas.util.testing as tm
from pandas import compat
from numpy.testing import assert_array_equal
class TestPeriodProperties(tm.TestCase):
"Test properties such as year, month, weekday, etc...."
#
def test_quarterly_negative_ordinals(self):
p = Period(ordinal=-1, freq='Q-DEC')
self.assertEqual(p.year, 1969)
self.assertEqual(p.quarter, 4)
p = Period(ordinal=-2, freq='Q-DEC')
self.assertEqual(p.year, 1969)
self.assertEqual(p.quarter, 3)
p = Period(ordinal=-2, freq='M')
self.assertEqual(p.year, 1969)
self.assertEqual(p.month, 11)
def test_period_cons_quarterly(self):
# bugs in scikits.timeseries
for month in MONTHS:
freq = 'Q-%s' % month
exp = Period('1989Q3', freq=freq)
self.assertIn('1989Q3', str(exp))
stamp = exp.to_timestamp('D', how='end')
p = Period(stamp, freq=freq)
self.assertEqual(p, exp)
def test_period_cons_annual(self):
# bugs in scikits.timeseries
for month in MONTHS:
freq = 'A-%s' % month
exp = Period('1989', freq=freq)
stamp = exp.to_timestamp('D', how='end') + timedelta(days=30)
p = Period(stamp, freq=freq)
self.assertEqual(p, exp + 1)
def test_period_cons_weekly(self):
for num in range(10, 17):
daystr = '2011-02-%d' % num
for day in DAYS:
freq = 'W-%s' % day
result = Period(daystr, freq=freq)
expected = Period(daystr, freq='D').asfreq(freq)
self.assertEqual(result, expected)
def test_period_cons_nat(self):
p = Period('NaT', freq='M')
self.assertEqual(p.ordinal, tslib.iNaT)
self.assertEqual(p.freq, 'M')
p = Period('nat', freq='W-SUN')
self.assertEqual(p.ordinal, tslib.iNaT)
self.assertEqual(p.freq, 'W-SUN')
p = Period(tslib.iNaT, freq='D')
self.assertEqual(p.ordinal, tslib.iNaT)
self.assertEqual(p.freq, 'D')
self.assertRaises(ValueError, Period, 'NaT')
def test_timestamp_tz_arg(self):
import pytz
p = Period('1/1/2005', freq='M').to_timestamp(tz='Europe/Brussels')
self.assertEqual(p.tz,
pytz.timezone('Europe/Brussels').normalize(p).tzinfo)
def test_timestamp_tz_arg_dateutil(self):
from pandas.tslib import _dateutil_gettz as gettz
from pandas.tslib import maybe_get_tz
p = Period('1/1/2005', freq='M').to_timestamp(tz=maybe_get_tz('dateutil/Europe/Brussels'))
self.assertEqual(p.tz, gettz('Europe/Brussels'))
def test_timestamp_tz_arg_dateutil_from_string(self):
from pandas.tslib import _dateutil_gettz as gettz
p = Period('1/1/2005', freq='M').to_timestamp(tz='dateutil/Europe/Brussels')
self.assertEqual(p.tz, gettz('Europe/Brussels'))
def test_timestamp_nat_tz(self):
t = Period('NaT', freq='M').to_timestamp()
self.assertTrue(t is tslib.NaT)
t = Period('NaT', freq='M').to_timestamp(tz='Asia/Tokyo')
self.assertTrue(t is tslib.NaT)
def test_period_constructor(self):
i1 = Period('1/1/2005', freq='M')
i2 = Period('Jan 2005')
self.assertEqual(i1, i2)
i1 = Period('2005', freq='A')
i2 = Period('2005')
i3 = Period('2005', freq='a')
self.assertEqual(i1, i2)
self.assertEqual(i1, i3)
i4 = Period('2005', freq='M')
i5 = Period('2005', freq='m')
self.assertRaises(ValueError, i1.__ne__, i4)
self.assertEqual(i4, i5)
i1 = Period.now('Q')
i2 = Period(datetime.now(), freq='Q')
i3 = Period.now('q')
self.assertEqual(i1, i2)
self.assertEqual(i1, i3)
# Biz day construction, roll forward if non-weekday
i1 = Period('3/10/12', freq='B')
i2 = Period('3/10/12', freq='D')
self.assertEqual(i1, i2.asfreq('B'))
i2 = Period('3/11/12', freq='D')
self.assertEqual(i1, i2.asfreq('B'))
i2 = Period('3/12/12', freq='D')
self.assertEqual(i1, i2.asfreq('B'))
i3 = Period('3/10/12', freq='b')
self.assertEqual(i1, i3)
i1 = Period(year=2005, quarter=1, freq='Q')
i2 = Period('1/1/2005', freq='Q')
self.assertEqual(i1, i2)
i1 = Period(year=2005, quarter=3, freq='Q')
i2 = Period('9/1/2005', freq='Q')
self.assertEqual(i1, i2)
i1 = Period(year=2005, month=3, day=1, freq='D')
i2 = Period('3/1/2005', freq='D')
self.assertEqual(i1, i2)
i3 = Period(year=2005, month=3, day=1, freq='d')
self.assertEqual(i1, i3)
i1 = Period(year=2012, month=3, day=10, freq='B')
i2 = Period('3/12/12', freq='B')
self.assertEqual(i1, i2)
i1 = Period('2005Q1')
i2 = Period(year=2005, quarter=1, freq='Q')
i3 = Period('2005q1')
self.assertEqual(i1, i2)
self.assertEqual(i1, i3)
i1 = Period('05Q1')
self.assertEqual(i1, i2)
lower = Period('05q1')
self.assertEqual(i1, lower)
i1 = Period('1Q2005')
self.assertEqual(i1, i2)
lower = Period('1q2005')
self.assertEqual(i1, lower)
i1 = Period('1Q05')
self.assertEqual(i1, i2)
lower = Period('1q05')
self.assertEqual(i1, lower)
i1 = Period('4Q1984')
self.assertEqual(i1.year, 1984)
lower = Period('4q1984')
self.assertEqual(i1, lower)
i1 = Period('1982', freq='min')
i2 = Period('1982', freq='MIN')
self.assertEqual(i1, i2)
i2 = Period('1982', freq=('Min', 1))
self.assertEqual(i1, i2)
expected = Period('2007-01', freq='M')
i1 = Period('200701', freq='M')
self.assertEqual(i1, expected)
i1 = Period('200701', freq='M')
self.assertEqual(i1, expected)
i1 = Period(200701, freq='M')
self.assertEqual(i1, expected)
i1 = Period(ordinal=200701, freq='M')
self.assertEqual(i1.year, 18695)
i1 = Period(datetime(2007, 1, 1), freq='M')
i2 = Period('200701', freq='M')
self.assertEqual(i1, i2)
i1 = Period(date(2007, 1, 1), freq='M')
i2 = Period(datetime(2007, 1, 1), freq='M')
i3 = Period(np.datetime64('2007-01-01'), freq='M')
i4 = Period(np.datetime64('2007-01-01 00:00:00Z'), freq='M')
i5 = Period(np.datetime64('2007-01-01 00:00:00.000Z'), freq='M')
self.assertEqual(i1, i2)
self.assertEqual(i1, i3)
self.assertEqual(i1, i4)
self.assertEqual(i1, i5)
i1 = Period('2007-01-01 09:00:00.001')
expected = Period(datetime(2007, 1, 1, 9, 0, 0, 1000), freq='L')
self.assertEqual(i1, expected)
expected = Period(np.datetime64('2007-01-01 09:00:00.001Z'), freq='L')
self.assertEqual(i1, expected)
i1 = Period('2007-01-01 09:00:00.00101')
expected = Period(datetime(2007, 1, 1, 9, 0, 0, 1010), freq='U')
self.assertEqual(i1, expected)
expected = Period(np.datetime64('2007-01-01 09:00:00.00101Z'),
freq='U')
self.assertEqual(i1, expected)
self.assertRaises(ValueError, Period, ordinal=200701)
self.assertRaises(ValueError, Period, '2007-1-1', freq='X')
def test_freq_str(self):
i1 = Period('1982', freq='Min')
self.assertNotEqual(i1.freq[0], '1')
def test_repr(self):
p = Period('Jan-2000')
self.assertIn('2000-01', repr(p))
p = Period('2000-12-15')
self.assertIn('2000-12-15', repr(p))
def test_repr_nat(self):
p = Period('nat', freq='M')
self.assertIn(repr(tslib.NaT), repr(p))
def test_millisecond_repr(self):
p = Period('2000-01-01 12:15:02.123')
self.assertEqual("Period('2000-01-01 12:15:02.123', 'L')", repr(p))
def test_microsecond_repr(self):
p = Period('2000-01-01 12:15:02.123567')
self.assertEqual("Period('2000-01-01 12:15:02.123567', 'U')", repr(p))
def test_strftime(self):
p = Period('2000-1-1 12:34:12', freq='S')
res = p.strftime('%Y-%m-%d %H:%M:%S')
self.assertEqual(res, '2000-01-01 12:34:12')
tm.assert_isinstance(res, compat.text_type) # GH3363
def test_sub_delta(self):
left, right = Period('2011', freq='A'), Period('2007', freq='A')
result = left - right
self.assertEqual(result, 4)
self.assertRaises(ValueError, left.__sub__,
Period('2007-01', freq='M'))
def test_to_timestamp(self):
p = Period('1982', freq='A')
start_ts = p.to_timestamp(how='S')
aliases = ['s', 'StarT', 'BEGIn']
for a in aliases:
self.assertEqual(start_ts, p.to_timestamp('D', how=a))
end_ts = p.to_timestamp(how='E')
aliases = ['e', 'end', 'FINIsH']
for a in aliases:
self.assertEqual(end_ts, p.to_timestamp('D', how=a))
from_lst = ['A', 'Q', 'M', 'W', 'B',
'D', 'H', 'Min', 'S']
def _ex(p):
return Timestamp((p + 1).start_time.value - 1)
for i, fcode in enumerate(from_lst):
p = Period('1982', freq=fcode)
result = p.to_timestamp().to_period(fcode)
self.assertEqual(result, p)
self.assertEqual(p.start_time, p.to_timestamp(how='S'))
self.assertEqual(p.end_time, _ex(p))
# Frequency other than daily
p = Period('1985', freq='A')
result = p.to_timestamp('H', how='end')
expected = datetime(1985, 12, 31, 23)
self.assertEqual(result, expected)
result = p.to_timestamp('T', how='end')
expected = datetime(1985, 12, 31, 23, 59)
self.assertEqual(result, expected)
result = p.to_timestamp(how='end')
expected = datetime(1985, 12, 31)
self.assertEqual(result, expected)
expected = datetime(1985, 1, 1)
result = p.to_timestamp('H', how='start')
self.assertEqual(result, expected)
result = p.to_timestamp('T', how='start')
self.assertEqual(result, expected)
result = p.to_timestamp('S', how='start')
self.assertEqual(result, expected)
assertRaisesRegexp(ValueError, 'Only mult == 1', p.to_timestamp, '5t')
p = Period('NaT', freq='W')
self.assertTrue(p.to_timestamp() is tslib.NaT)
def test_start_time(self):
freq_lst = ['A', 'Q', 'M', 'D', 'H', 'T', 'S']
xp = datetime(2012, 1, 1)
for f in freq_lst:
p = Period('2012', freq=f)
self.assertEqual(p.start_time, xp)
self.assertEqual(Period('2012', freq='B').start_time,
datetime(2012, 1, 2))
self.assertEqual(Period('2012', freq='W').start_time,
datetime(2011, 12, 26))
p = Period('NaT', freq='W')
self.assertTrue(p.start_time is tslib.NaT)
def test_end_time(self):
p = Period('2012', freq='A')
def _ex(*args):
return Timestamp(Timestamp(datetime(*args)).value - 1)
xp = _ex(2013, 1, 1)
self.assertEqual(xp, p.end_time)
p = Period('2012', freq='Q')
xp = _ex(2012, 4, 1)
self.assertEqual(xp, p.end_time)
p = Period('2012', freq='M')
xp = _ex(2012, 2, 1)
self.assertEqual(xp, p.end_time)
xp = _ex(2012, 1, 2)
p = Period('2012', freq='D')
self.assertEqual(p.end_time, xp)
xp = _ex(2012, 1, 1, 1)
p = Period('2012', freq='H')
self.assertEqual(p.end_time, xp)
xp = _ex(2012, 1, 3)
self.assertEqual(Period('2012', freq='B').end_time, xp)
xp = _ex(2012, 1, 2)
self.assertEqual(Period('2012', freq='W').end_time, xp)
p = Period('NaT', freq='W')
self.assertTrue(p.end_time is tslib.NaT)
def test_anchor_week_end_time(self):
def _ex(*args):
return Timestamp(Timestamp(datetime(*args)).value - 1)
p = Period('2013-1-1', 'W-SAT')
xp = _ex(2013, 1, 6)
self.assertEqual(p.end_time, xp)
def test_properties_annually(self):
# Test properties on Periods with annually frequency.
a_date = Period(freq='A', year=2007)
assert_equal(a_date.year, 2007)
def test_properties_quarterly(self):
# Test properties on Periods with daily frequency.
qedec_date = Period(freq="Q-DEC", year=2007, quarter=1)
qejan_date = Period(freq="Q-JAN", year=2007, quarter=1)
qejun_date = Period(freq="Q-JUN", year=2007, quarter=1)
#
for x in range(3):
for qd in (qedec_date, qejan_date, qejun_date):
assert_equal((qd + x).qyear, 2007)
assert_equal((qd + x).quarter, x + 1)
def test_properties_monthly(self):
# Test properties on Periods with daily frequency.
m_date = Period(freq='M', year=2007, month=1)
for x in range(11):
m_ival_x = m_date + x
assert_equal(m_ival_x.year, 2007)
if 1 <= x + 1 <= 3:
assert_equal(m_ival_x.quarter, 1)
elif 4 <= x + 1 <= 6:
assert_equal(m_ival_x.quarter, 2)
elif 7 <= x + 1 <= 9:
assert_equal(m_ival_x.quarter, 3)
elif 10 <= x + 1 <= 12:
assert_equal(m_ival_x.quarter, 4)
assert_equal(m_ival_x.month, x + 1)
def test_properties_weekly(self):
# Test properties on Periods with daily frequency.
w_date = Period(freq='WK', year=2007, month=1, day=7)
#
assert_equal(w_date.year, 2007)
assert_equal(w_date.quarter, 1)
assert_equal(w_date.month, 1)
assert_equal(w_date.week, 1)
assert_equal((w_date - 1).week, 52)
assert_equal(w_date.days_in_month, 31)
assert_equal(Period(freq='WK', year=2012, month=2, day=1).days_in_month, 29)
def test_properties_daily(self):
# Test properties on Periods with daily frequency.
b_date = Period(freq='B', year=2007, month=1, day=1)
#
assert_equal(b_date.year, 2007)
assert_equal(b_date.quarter, 1)
assert_equal(b_date.month, 1)
assert_equal(b_date.day, 1)
assert_equal(b_date.weekday, 0)
assert_equal(b_date.dayofyear, 1)
assert_equal(b_date.days_in_month, 31)
assert_equal(Period(freq='B', year=2012, month=2, day=1).days_in_month, 29)
#
d_date = Period(freq='D', year=2007, month=1, day=1)
#
assert_equal(d_date.year, 2007)
assert_equal(d_date.quarter, 1)
assert_equal(d_date.month, 1)
assert_equal(d_date.day, 1)
assert_equal(d_date.weekday, 0)
assert_equal(d_date.dayofyear, 1)
assert_equal(d_date.days_in_month, 31)
assert_equal(Period(freq='D', year=2012, month=2,
day=1).days_in_month, 29)
def test_properties_hourly(self):
# Test properties on Periods with hourly frequency.
h_date = Period(freq='H', year=2007, month=1, day=1, hour=0)
#
assert_equal(h_date.year, 2007)
assert_equal(h_date.quarter, 1)
assert_equal(h_date.month, 1)
assert_equal(h_date.day, 1)
assert_equal(h_date.weekday, 0)
assert_equal(h_date.dayofyear, 1)
assert_equal(h_date.hour, 0)
assert_equal(h_date.days_in_month, 31)
assert_equal(Period(freq='H', year=2012, month=2, day=1,
hour=0).days_in_month, 29)
#
def test_properties_minutely(self):
# Test properties on Periods with minutely frequency.
t_date = Period(freq='Min', year=2007, month=1, day=1, hour=0,
minute=0)
#
assert_equal(t_date.quarter, 1)
assert_equal(t_date.month, 1)
assert_equal(t_date.day, 1)
assert_equal(t_date.weekday, 0)
assert_equal(t_date.dayofyear, 1)
assert_equal(t_date.hour, 0)
assert_equal(t_date.minute, 0)
assert_equal(t_date.days_in_month, 31)
assert_equal(Period(freq='D', year=2012, month=2, day=1, hour=0,
minute=0).days_in_month, 29)
def test_properties_secondly(self):
# Test properties on Periods with secondly frequency.
s_date = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
#
assert_equal(s_date.year, 2007)
assert_equal(s_date.quarter, 1)
assert_equal(s_date.month, 1)
assert_equal(s_date.day, 1)
assert_equal(s_date.weekday, 0)
assert_equal(s_date.dayofyear, 1)
assert_equal(s_date.hour, 0)
assert_equal(s_date.minute, 0)
assert_equal(s_date.second, 0)
assert_equal(s_date.days_in_month, 31)
assert_equal(Period(freq='Min', year=2012, month=2, day=1, hour=0,
minute=0, second=0).days_in_month, 29)
def test_properties_nat(self):
p_nat = Period('NaT', freq='M')
t_nat = pd.Timestamp('NaT')
# confirm Period('NaT') work identical with Timestamp('NaT')
for f in ['year', 'month', 'day', 'hour', 'minute', 'second',
'week', 'dayofyear', 'quarter', 'days_in_month']:
self.assertTrue(np.isnan(getattr(p_nat, f)))
self.assertTrue(np.isnan(getattr(t_nat, f)))
for f in ['weekofyear', 'dayofweek', 'weekday', 'qyear']:
self.assertTrue(np.isnan(getattr(p_nat, f)))
def test_pnow(self):
dt = datetime.now()
val = period.pnow('D')
exp = Period(dt, freq='D')
self.assertEqual(val, exp)
def test_constructor_corner(self):
self.assertRaises(ValueError, Period, year=2007, month=1,
freq='2M')
self.assertRaises(ValueError, Period, datetime.now())
self.assertRaises(ValueError, Period, datetime.now().date())
self.assertRaises(ValueError, Period, 1.6, freq='D')
self.assertRaises(ValueError, Period, ordinal=1.6, freq='D')
self.assertRaises(ValueError, Period, ordinal=2, value=1, freq='D')
self.assertRaises(ValueError, Period)
self.assertRaises(ValueError, Period, month=1)
p = Period('2007-01-01', freq='D')
result = Period(p, freq='A')
exp = Period('2007', freq='A')
self.assertEqual(result, exp)
def test_constructor_infer_freq(self):
p = Period('2007-01-01')
self.assertEqual(p.freq, 'D')
p = Period('2007-01-01 07')
self.assertEqual(p.freq, 'H')
p = Period('2007-01-01 07:10')
self.assertEqual(p.freq, 'T')
p = Period('2007-01-01 07:10:15')
self.assertEqual(p.freq, 'S')
p = Period('2007-01-01 07:10:15.123')
self.assertEqual(p.freq, 'L')
p = Period('2007-01-01 07:10:15.123000')
self.assertEqual(p.freq, 'L')
p = Period('2007-01-01 07:10:15.123400')
self.assertEqual(p.freq, 'U')
def test_asfreq_MS(self):
initial = Period("2013")
self.assertEqual(initial.asfreq(freq="M", how="S"), Period('2013-01', 'M'))
self.assertRaises(ValueError, initial.asfreq, freq="MS", how="S")
tm.assertRaisesRegexp(ValueError, "Unknown freqstr: MS", pd.Period, '2013-01', 'MS')
self.assertTrue(_period_code_map.get("MS") is None)
def noWrap(item):
return item
class TestFreqConversion(tm.TestCase):
"Test frequency conversion of date objects"
def test_asfreq_corner(self):
val = Period(freq='A', year=2007)
self.assertRaises(ValueError, val.asfreq, '5t')
def test_conv_annual(self):
# frequency conversion tests: from Annual Frequency
ival_A = Period(freq='A', year=2007)
ival_AJAN = Period(freq="A-JAN", year=2007)
ival_AJUN = Period(freq="A-JUN", year=2007)
ival_ANOV = Period(freq="A-NOV", year=2007)
ival_A_to_Q_start = Period(freq='Q', year=2007, quarter=1)
ival_A_to_Q_end = Period(freq='Q', year=2007, quarter=4)
ival_A_to_M_start = Period(freq='M', year=2007, month=1)
ival_A_to_M_end = Period(freq='M', year=2007, month=12)
ival_A_to_W_start = Period(freq='WK', year=2007, month=1, day=1)
ival_A_to_W_end = Period(freq='WK', year=2007, month=12, day=31)
ival_A_to_B_start = Period(freq='B', year=2007, month=1, day=1)
ival_A_to_B_end = Period(freq='B', year=2007, month=12, day=31)
ival_A_to_D_start = Period(freq='D', year=2007, month=1, day=1)
ival_A_to_D_end = Period(freq='D', year=2007, month=12, day=31)
ival_A_to_H_start = Period(freq='H', year=2007, month=1, day=1,
hour=0)
ival_A_to_H_end = Period(freq='H', year=2007, month=12, day=31,
hour=23)
ival_A_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_A_to_T_end = Period(freq='Min', year=2007, month=12, day=31,
hour=23, minute=59)
ival_A_to_S_start = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
ival_A_to_S_end = Period(freq='S', year=2007, month=12, day=31,
hour=23, minute=59, second=59)
ival_AJAN_to_D_end = Period(freq='D', year=2007, month=1, day=31)
ival_AJAN_to_D_start = Period(freq='D', year=2006, month=2, day=1)
ival_AJUN_to_D_end = Period(freq='D', year=2007, month=6, day=30)
ival_AJUN_to_D_start = Period(freq='D', year=2006, month=7, day=1)
ival_ANOV_to_D_end = Period(freq='D', year=2007, month=11, day=30)
ival_ANOV_to_D_start = Period(freq='D', year=2006, month=12, day=1)
assert_equal(ival_A.asfreq('Q', 'S'), ival_A_to_Q_start)
assert_equal(ival_A.asfreq('Q', 'e'), ival_A_to_Q_end)
assert_equal(ival_A.asfreq('M', 's'), ival_A_to_M_start)
assert_equal(ival_A.asfreq('M', 'E'), ival_A_to_M_end)
assert_equal(ival_A.asfreq('WK', 'S'), ival_A_to_W_start)
assert_equal(ival_A.asfreq('WK', 'E'), ival_A_to_W_end)
assert_equal(ival_A.asfreq('B', 'S'), ival_A_to_B_start)
assert_equal(ival_A.asfreq('B', 'E'), ival_A_to_B_end)
assert_equal(ival_A.asfreq('D', 'S'), ival_A_to_D_start)
assert_equal(ival_A.asfreq('D', 'E'), ival_A_to_D_end)
assert_equal(ival_A.asfreq('H', 'S'), ival_A_to_H_start)
assert_equal(ival_A.asfreq('H', 'E'), ival_A_to_H_end)
assert_equal(ival_A.asfreq('min', 'S'), ival_A_to_T_start)
assert_equal(ival_A.asfreq('min', 'E'), ival_A_to_T_end)
assert_equal(ival_A.asfreq('T', 'S'), ival_A_to_T_start)
assert_equal(ival_A.asfreq('T', 'E'), ival_A_to_T_end)
assert_equal(ival_A.asfreq('S', 'S'), ival_A_to_S_start)
assert_equal(ival_A.asfreq('S', 'E'), ival_A_to_S_end)
assert_equal(ival_AJAN.asfreq('D', 'S'), ival_AJAN_to_D_start)
assert_equal(ival_AJAN.asfreq('D', 'E'), ival_AJAN_to_D_end)
assert_equal(ival_AJUN.asfreq('D', 'S'), ival_AJUN_to_D_start)
assert_equal(ival_AJUN.asfreq('D', 'E'), ival_AJUN_to_D_end)
assert_equal(ival_ANOV.asfreq('D', 'S'), ival_ANOV_to_D_start)
assert_equal(ival_ANOV.asfreq('D', 'E'), ival_ANOV_to_D_end)
assert_equal(ival_A.asfreq('A'), ival_A)
def test_conv_quarterly(self):
# frequency conversion tests: from Quarterly Frequency
ival_Q = Period(freq='Q', year=2007, quarter=1)
ival_Q_end_of_year = Period(freq='Q', year=2007, quarter=4)
ival_QEJAN = Period(freq="Q-JAN", year=2007, quarter=1)
ival_QEJUN = Period(freq="Q-JUN", year=2007, quarter=1)
ival_Q_to_A = Period(freq='A', year=2007)
ival_Q_to_M_start = Period(freq='M', year=2007, month=1)
ival_Q_to_M_end = Period(freq='M', year=2007, month=3)
ival_Q_to_W_start = Period(freq='WK', year=2007, month=1, day=1)
ival_Q_to_W_end = Period(freq='WK', year=2007, month=3, day=31)
ival_Q_to_B_start = Period(freq='B', year=2007, month=1, day=1)
ival_Q_to_B_end = Period(freq='B', year=2007, month=3, day=30)
ival_Q_to_D_start = Period(freq='D', year=2007, month=1, day=1)
ival_Q_to_D_end = Period(freq='D', year=2007, month=3, day=31)
ival_Q_to_H_start = Period(freq='H', year=2007, month=1, day=1,
hour=0)
ival_Q_to_H_end = Period(freq='H', year=2007, month=3, day=31,
hour=23)
ival_Q_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_Q_to_T_end = Period(freq='Min', year=2007, month=3, day=31,
hour=23, minute=59)
ival_Q_to_S_start = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
ival_Q_to_S_end = Period(freq='S', year=2007, month=3, day=31,
hour=23, minute=59, second=59)
ival_QEJAN_to_D_start = Period(freq='D', year=2006, month=2, day=1)
ival_QEJAN_to_D_end = Period(freq='D', year=2006, month=4, day=30)
ival_QEJUN_to_D_start = Period(freq='D', year=2006, month=7, day=1)
ival_QEJUN_to_D_end = Period(freq='D', year=2006, month=9, day=30)
assert_equal(ival_Q.asfreq('A'), ival_Q_to_A)
assert_equal(ival_Q_end_of_year.asfreq('A'), ival_Q_to_A)
assert_equal(ival_Q.asfreq('M', 'S'), ival_Q_to_M_start)
assert_equal(ival_Q.asfreq('M', 'E'), ival_Q_to_M_end)
assert_equal(ival_Q.asfreq('WK', 'S'), ival_Q_to_W_start)
assert_equal(ival_Q.asfreq('WK', 'E'), ival_Q_to_W_end)
assert_equal(ival_Q.asfreq('B', 'S'), ival_Q_to_B_start)
assert_equal(ival_Q.asfreq('B', 'E'), ival_Q_to_B_end)
assert_equal(ival_Q.asfreq('D', 'S'), ival_Q_to_D_start)
assert_equal(ival_Q.asfreq('D', 'E'), ival_Q_to_D_end)
assert_equal(ival_Q.asfreq('H', 'S'), ival_Q_to_H_start)
assert_equal(ival_Q.asfreq('H', 'E'), ival_Q_to_H_end)
assert_equal(ival_Q.asfreq('Min', 'S'), ival_Q_to_T_start)
assert_equal(ival_Q.asfreq('Min', 'E'), ival_Q_to_T_end)
assert_equal(ival_Q.asfreq('S', 'S'), ival_Q_to_S_start)
assert_equal(ival_Q.asfreq('S', 'E'), ival_Q_to_S_end)
assert_equal(ival_QEJAN.asfreq('D', 'S'), ival_QEJAN_to_D_start)
assert_equal(ival_QEJAN.asfreq('D', 'E'), ival_QEJAN_to_D_end)
assert_equal(ival_QEJUN.asfreq('D', 'S'), ival_QEJUN_to_D_start)
assert_equal(ival_QEJUN.asfreq('D', 'E'), ival_QEJUN_to_D_end)
assert_equal(ival_Q.asfreq('Q'), ival_Q)
def test_conv_monthly(self):
# frequency conversion tests: from Monthly Frequency
ival_M = Period(freq='M', year=2007, month=1)
ival_M_end_of_year = Period(freq='M', year=2007, month=12)
ival_M_end_of_quarter = Period(freq='M', year=2007, month=3)
ival_M_to_A = Period(freq='A', year=2007)
ival_M_to_Q = Period(freq='Q', year=2007, quarter=1)
ival_M_to_W_start = Period(freq='WK', year=2007, month=1, day=1)
ival_M_to_W_end = Period(freq='WK', year=2007, month=1, day=31)
ival_M_to_B_start = Period(freq='B', year=2007, month=1, day=1)
ival_M_to_B_end = Period(freq='B', year=2007, month=1, day=31)
ival_M_to_D_start = Period(freq='D', year=2007, month=1, day=1)
ival_M_to_D_end = Period(freq='D', year=2007, month=1, day=31)
ival_M_to_H_start = Period(freq='H', year=2007, month=1, day=1,
hour=0)
ival_M_to_H_end = Period(freq='H', year=2007, month=1, day=31,
hour=23)
ival_M_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_M_to_T_end = Period(freq='Min', year=2007, month=1, day=31,
hour=23, minute=59)
ival_M_to_S_start = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
ival_M_to_S_end = Period(freq='S', year=2007, month=1, day=31,
hour=23, minute=59, second=59)
assert_equal(ival_M.asfreq('A'), ival_M_to_A)
assert_equal(ival_M_end_of_year.asfreq('A'), ival_M_to_A)
assert_equal(ival_M.asfreq('Q'), ival_M_to_Q)
assert_equal(ival_M_end_of_quarter.asfreq('Q'), ival_M_to_Q)
assert_equal(ival_M.asfreq('WK', 'S'), ival_M_to_W_start)
assert_equal(ival_M.asfreq('WK', 'E'), ival_M_to_W_end)
assert_equal(ival_M.asfreq('B', 'S'), ival_M_to_B_start)
assert_equal(ival_M.asfreq('B', 'E'), ival_M_to_B_end)
assert_equal(ival_M.asfreq('D', 'S'), ival_M_to_D_start)
assert_equal(ival_M.asfreq('D', 'E'), ival_M_to_D_end)
assert_equal(ival_M.asfreq('H', 'S'), ival_M_to_H_start)
assert_equal(ival_M.asfreq('H', 'E'), ival_M_to_H_end)
assert_equal(ival_M.asfreq('Min', 'S'), ival_M_to_T_start)
assert_equal(ival_M.asfreq('Min', 'E'), ival_M_to_T_end)
assert_equal(ival_M.asfreq('S', 'S'), ival_M_to_S_start)
assert_equal(ival_M.asfreq('S', 'E'), ival_M_to_S_end)
assert_equal(ival_M.asfreq('M'), ival_M)
def test_conv_weekly(self):
# frequency conversion tests: from Weekly Frequency
ival_W = Period(freq='WK', year=2007, month=1, day=1)
ival_WSUN = Period(freq='WK', year=2007, month=1, day=7)
ival_WSAT = Period(freq='WK-SAT', year=2007, month=1, day=6)
ival_WFRI = Period(freq='WK-FRI', year=2007, month=1, day=5)
ival_WTHU = Period(freq='WK-THU', year=2007, month=1, day=4)
ival_WWED = Period(freq='WK-WED', year=2007, month=1, day=3)
ival_WTUE = Period(freq='WK-TUE', year=2007, month=1, day=2)
ival_WMON = Period(freq='WK-MON', year=2007, month=1, day=1)
ival_WSUN_to_D_start = Period(freq='D', year=2007, month=1, day=1)
ival_WSUN_to_D_end = Period(freq='D', year=2007, month=1, day=7)
ival_WSAT_to_D_start = Period(freq='D', year=2006, month=12, day=31)
ival_WSAT_to_D_end = Period(freq='D', year=2007, month=1, day=6)
ival_WFRI_to_D_start = Period(freq='D', year=2006, month=12, day=30)
ival_WFRI_to_D_end = Period(freq='D', year=2007, month=1, day=5)
ival_WTHU_to_D_start = Period(freq='D', year=2006, month=12, day=29)
ival_WTHU_to_D_end = Period(freq='D', year=2007, month=1, day=4)
ival_WWED_to_D_start = Period(freq='D', year=2006, month=12, day=28)
ival_WWED_to_D_end = Period(freq='D', year=2007, month=1, day=3)
ival_WTUE_to_D_start = Period(freq='D', year=2006, month=12, day=27)
ival_WTUE_to_D_end = Period(freq='D', year=2007, month=1, day=2)
ival_WMON_to_D_start = Period(freq='D', year=2006, month=12, day=26)
ival_WMON_to_D_end = Period(freq='D', year=2007, month=1, day=1)
ival_W_end_of_year = Period(freq='WK', year=2007, month=12, day=31)
ival_W_end_of_quarter = Period(freq='WK', year=2007, month=3, day=31)
ival_W_end_of_month = Period(freq='WK', year=2007, month=1, day=31)
ival_W_to_A = Period(freq='A', year=2007)
ival_W_to_Q = Period(freq='Q', year=2007, quarter=1)
ival_W_to_M = Period(freq='M', year=2007, month=1)
if Period(freq='D', year=2007, month=12, day=31).weekday == 6:
ival_W_to_A_end_of_year = Period(freq='A', year=2007)
else:
ival_W_to_A_end_of_year = Period(freq='A', year=2008)
if Period(freq='D', year=2007, month=3, day=31).weekday == 6:
ival_W_to_Q_end_of_quarter = Period(freq='Q', year=2007,
quarter=1)
else:
ival_W_to_Q_end_of_quarter = Period(freq='Q', year=2007,
quarter=2)
if Period(freq='D', year=2007, month=1, day=31).weekday == 6:
ival_W_to_M_end_of_month = Period(freq='M', year=2007, month=1)
else:
ival_W_to_M_end_of_month = Period(freq='M', year=2007, month=2)
ival_W_to_B_start = Period(freq='B', year=2007, month=1, day=1)
ival_W_to_B_end = Period(freq='B', year=2007, month=1, day=5)
ival_W_to_D_start = Period(freq='D', year=2007, month=1, day=1)
ival_W_to_D_end = Period(freq='D', year=2007, month=1, day=7)
ival_W_to_H_start = Period(freq='H', year=2007, month=1, day=1,
hour=0)
ival_W_to_H_end = Period(freq='H', year=2007, month=1, day=7,
hour=23)
ival_W_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_W_to_T_end = Period(freq='Min', year=2007, month=1, day=7,
hour=23, minute=59)
ival_W_to_S_start = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
ival_W_to_S_end = Period(freq='S', year=2007, month=1, day=7,
hour=23, minute=59, second=59)
assert_equal(ival_W.asfreq('A'), ival_W_to_A)
assert_equal(ival_W_end_of_year.asfreq('A'),
ival_W_to_A_end_of_year)
assert_equal(ival_W.asfreq('Q'), ival_W_to_Q)
assert_equal(ival_W_end_of_quarter.asfreq('Q'),
ival_W_to_Q_end_of_quarter)
assert_equal(ival_W.asfreq('M'), ival_W_to_M)
assert_equal(ival_W_end_of_month.asfreq('M'),
ival_W_to_M_end_of_month)
assert_equal(ival_W.asfreq('B', 'S'), ival_W_to_B_start)
assert_equal(ival_W.asfreq('B', 'E'), ival_W_to_B_end)
assert_equal(ival_W.asfreq('D', 'S'), ival_W_to_D_start)
assert_equal(ival_W.asfreq('D', 'E'), ival_W_to_D_end)
assert_equal(ival_WSUN.asfreq('D', 'S'), ival_WSUN_to_D_start)
assert_equal(ival_WSUN.asfreq('D', 'E'), ival_WSUN_to_D_end)
assert_equal(ival_WSAT.asfreq('D', 'S'), ival_WSAT_to_D_start)
assert_equal(ival_WSAT.asfreq('D', 'E'), ival_WSAT_to_D_end)
assert_equal(ival_WFRI.asfreq('D', 'S'), ival_WFRI_to_D_start)
assert_equal(ival_WFRI.asfreq('D', 'E'), ival_WFRI_to_D_end)
assert_equal(ival_WTHU.asfreq('D', 'S'), ival_WTHU_to_D_start)
assert_equal(ival_WTHU.asfreq('D', 'E'), ival_WTHU_to_D_end)
assert_equal(ival_WWED.asfreq('D', 'S'), ival_WWED_to_D_start)
assert_equal(ival_WWED.asfreq('D', 'E'), ival_WWED_to_D_end)
assert_equal(ival_WTUE.asfreq('D', 'S'), ival_WTUE_to_D_start)
assert_equal(ival_WTUE.asfreq('D', 'E'), ival_WTUE_to_D_end)
assert_equal(ival_WMON.asfreq('D', 'S'), ival_WMON_to_D_start)
assert_equal(ival_WMON.asfreq('D', 'E'), ival_WMON_to_D_end)
assert_equal(ival_W.asfreq('H', 'S'), ival_W_to_H_start)
assert_equal(ival_W.asfreq('H', 'E'), ival_W_to_H_end)
assert_equal(ival_W.asfreq('Min', 'S'), ival_W_to_T_start)
assert_equal(ival_W.asfreq('Min', 'E'), ival_W_to_T_end)
assert_equal(ival_W.asfreq('S', 'S'), ival_W_to_S_start)
assert_equal(ival_W.asfreq('S', 'E'), ival_W_to_S_end)
assert_equal(ival_W.asfreq('WK'), ival_W)
def test_conv_business(self):
# frequency conversion tests: from Business Frequency"
ival_B = Period(freq='B', year=2007, month=1, day=1)
ival_B_end_of_year = Period(freq='B', year=2007, month=12, day=31)
ival_B_end_of_quarter = Period(freq='B', year=2007, month=3, day=30)
ival_B_end_of_month = Period(freq='B', year=2007, month=1, day=31)
ival_B_end_of_week = Period(freq='B', year=2007, month=1, day=5)
ival_B_to_A = Period(freq='A', year=2007)
ival_B_to_Q = Period(freq='Q', year=2007, quarter=1)
ival_B_to_M = Period(freq='M', year=2007, month=1)
ival_B_to_W = Period(freq='WK', year=2007, month=1, day=7)
ival_B_to_D = Period(freq='D', year=2007, month=1, day=1)
ival_B_to_H_start = Period(freq='H', year=2007, month=1, day=1,
hour=0)
ival_B_to_H_end = Period(freq='H', year=2007, month=1, day=1,
hour=23)
ival_B_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_B_to_T_end = Period(freq='Min', year=2007, month=1, day=1,
hour=23, minute=59)
ival_B_to_S_start = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
ival_B_to_S_end = Period(freq='S', year=2007, month=1, day=1,
hour=23, minute=59, second=59)
assert_equal(ival_B.asfreq('A'), ival_B_to_A)
assert_equal(ival_B_end_of_year.asfreq('A'), ival_B_to_A)
assert_equal(ival_B.asfreq('Q'), ival_B_to_Q)
assert_equal(ival_B_end_of_quarter.asfreq('Q'), ival_B_to_Q)
assert_equal(ival_B.asfreq('M'), ival_B_to_M)
assert_equal(ival_B_end_of_month.asfreq('M'), ival_B_to_M)
assert_equal(ival_B.asfreq('WK'), ival_B_to_W)
assert_equal(ival_B_end_of_week.asfreq('WK'), ival_B_to_W)
assert_equal(ival_B.asfreq('D'), ival_B_to_D)
assert_equal(ival_B.asfreq('H', 'S'), ival_B_to_H_start)
assert_equal(ival_B.asfreq('H', 'E'), ival_B_to_H_end)
assert_equal(ival_B.asfreq('Min', 'S'), ival_B_to_T_start)
assert_equal(ival_B.asfreq('Min', 'E'), ival_B_to_T_end)
assert_equal(ival_B.asfreq('S', 'S'), ival_B_to_S_start)
assert_equal(ival_B.asfreq('S', 'E'), ival_B_to_S_end)
assert_equal(ival_B.asfreq('B'), ival_B)
def test_conv_daily(self):
# frequency conversion tests: from Business Frequency"
ival_D = Period(freq='D', year=2007, month=1, day=1)
ival_D_end_of_year = Period(freq='D', year=2007, month=12, day=31)
ival_D_end_of_quarter = Period(freq='D', year=2007, month=3, day=31)
ival_D_end_of_month = Period(freq='D', year=2007, month=1, day=31)
ival_D_end_of_week = Period(freq='D', year=2007, month=1, day=7)
ival_D_friday = Period(freq='D', year=2007, month=1, day=5)
ival_D_saturday = Period(freq='D', year=2007, month=1, day=6)
ival_D_sunday = Period(freq='D', year=2007, month=1, day=7)
ival_D_monday = Period(freq='D', year=2007, month=1, day=8)
ival_B_friday = Period(freq='B', year=2007, month=1, day=5)
ival_B_monday = Period(freq='B', year=2007, month=1, day=8)
ival_D_to_A = Period(freq='A', year=2007)
ival_Deoq_to_AJAN = Period(freq='A-JAN', year=2008)
ival_Deoq_to_AJUN = Period(freq='A-JUN', year=2007)
ival_Deoq_to_ADEC = Period(freq='A-DEC', year=2007)
ival_D_to_QEJAN = Period(freq="Q-JAN", year=2007, quarter=4)
ival_D_to_QEJUN = Period(freq="Q-JUN", year=2007, quarter=3)
ival_D_to_QEDEC = Period(freq="Q-DEC", year=2007, quarter=1)
ival_D_to_M = Period(freq='M', year=2007, month=1)
ival_D_to_W = Period(freq='WK', year=2007, month=1, day=7)
ival_D_to_H_start = Period(freq='H', year=2007, month=1, day=1,
hour=0)
ival_D_to_H_end = Period(freq='H', year=2007, month=1, day=1,
hour=23)
ival_D_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_D_to_T_end = Period(freq='Min', year=2007, month=1, day=1,
hour=23, minute=59)
ival_D_to_S_start = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
ival_D_to_S_end = Period(freq='S', year=2007, month=1, day=1,
hour=23, minute=59, second=59)
assert_equal(ival_D.asfreq('A'), ival_D_to_A)
assert_equal(ival_D_end_of_quarter.asfreq('A-JAN'),
ival_Deoq_to_AJAN)
assert_equal(ival_D_end_of_quarter.asfreq('A-JUN'),
ival_Deoq_to_AJUN)
assert_equal(ival_D_end_of_quarter.asfreq('A-DEC'),
ival_Deoq_to_ADEC)
assert_equal(ival_D_end_of_year.asfreq('A'), ival_D_to_A)
assert_equal(ival_D_end_of_quarter.asfreq('Q'), ival_D_to_QEDEC)
assert_equal(ival_D.asfreq("Q-JAN"), ival_D_to_QEJAN)
assert_equal(ival_D.asfreq("Q-JUN"), ival_D_to_QEJUN)
assert_equal(ival_D.asfreq("Q-DEC"), ival_D_to_QEDEC)
assert_equal(ival_D.asfreq('M'), ival_D_to_M)
assert_equal(ival_D_end_of_month.asfreq('M'), ival_D_to_M)
assert_equal(ival_D.asfreq('WK'), ival_D_to_W)
assert_equal(ival_D_end_of_week.asfreq('WK'), ival_D_to_W)
assert_equal(ival_D_friday.asfreq('B'), ival_B_friday)
assert_equal(ival_D_saturday.asfreq('B', 'S'), ival_B_friday)
assert_equal(ival_D_saturday.asfreq('B', 'E'), ival_B_monday)
assert_equal(ival_D_sunday.asfreq('B', 'S'), ival_B_friday)
assert_equal(ival_D_sunday.asfreq('B', 'E'), ival_B_monday)
assert_equal(ival_D.asfreq('H', 'S'), ival_D_to_H_start)
assert_equal(ival_D.asfreq('H', 'E'), ival_D_to_H_end)
assert_equal(ival_D.asfreq('Min', 'S'), ival_D_to_T_start)
assert_equal(ival_D.asfreq('Min', 'E'), ival_D_to_T_end)
assert_equal(ival_D.asfreq('S', 'S'), ival_D_to_S_start)
assert_equal(ival_D.asfreq('S', 'E'), ival_D_to_S_end)
assert_equal(ival_D.asfreq('D'), ival_D)
def test_conv_hourly(self):
# frequency conversion tests: from Hourly Frequency"
ival_H = Period(freq='H', year=2007, month=1, day=1, hour=0)
ival_H_end_of_year = Period(freq='H', year=2007, month=12, day=31,
hour=23)
ival_H_end_of_quarter = Period(freq='H', year=2007, month=3, day=31,
hour=23)
ival_H_end_of_month = Period(freq='H', year=2007, month=1, day=31,
hour=23)
ival_H_end_of_week = Period(freq='H', year=2007, month=1, day=7,
hour=23)
ival_H_end_of_day = Period(freq='H', year=2007, month=1, day=1,
hour=23)
ival_H_end_of_bus = Period(freq='H', year=2007, month=1, day=1,
hour=23)
ival_H_to_A = Period(freq='A', year=2007)
ival_H_to_Q = Period(freq='Q', year=2007, quarter=1)
ival_H_to_M = Period(freq='M', year=2007, month=1)
ival_H_to_W = Period(freq='WK', year=2007, month=1, day=7)
ival_H_to_D = Period(freq='D', year=2007, month=1, day=1)
ival_H_to_B = Period(freq='B', year=2007, month=1, day=1)
ival_H_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_H_to_T_end = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=59)
ival_H_to_S_start = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
ival_H_to_S_end = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=59, second=59)
assert_equal(ival_H.asfreq('A'), ival_H_to_A)
assert_equal(ival_H_end_of_year.asfreq('A'), ival_H_to_A)
assert_equal(ival_H.asfreq('Q'), ival_H_to_Q)
assert_equal(ival_H_end_of_quarter.asfreq('Q'), ival_H_to_Q)
assert_equal(ival_H.asfreq('M'), ival_H_to_M)
assert_equal(ival_H_end_of_month.asfreq('M'), ival_H_to_M)
assert_equal(ival_H.asfreq('WK'), ival_H_to_W)
assert_equal(ival_H_end_of_week.asfreq('WK'), ival_H_to_W)
assert_equal(ival_H.asfreq('D'), ival_H_to_D)
assert_equal(ival_H_end_of_day.asfreq('D'), ival_H_to_D)
assert_equal(ival_H.asfreq('B'), ival_H_to_B)
assert_equal(ival_H_end_of_bus.asfreq('B'), ival_H_to_B)
assert_equal(ival_H.asfreq('Min', 'S'), ival_H_to_T_start)
assert_equal(ival_H.asfreq('Min', 'E'), ival_H_to_T_end)
assert_equal(ival_H.asfreq('S', 'S'), ival_H_to_S_start)
assert_equal(ival_H.asfreq('S', 'E'), ival_H_to_S_end)
assert_equal(ival_H.asfreq('H'), ival_H)
def test_conv_minutely(self):
# frequency conversion tests: from Minutely Frequency"
ival_T = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_T_end_of_year = Period(freq='Min', year=2007, month=12, day=31,
hour=23, minute=59)
ival_T_end_of_quarter = Period(freq='Min', year=2007, month=3, day=31,
hour=23, minute=59)
ival_T_end_of_month = Period(freq='Min', year=2007, month=1, day=31,
hour=23, minute=59)
ival_T_end_of_week = Period(freq='Min', year=2007, month=1, day=7,
hour=23, minute=59)
ival_T_end_of_day = Period(freq='Min', year=2007, month=1, day=1,
hour=23, minute=59)
ival_T_end_of_bus = Period(freq='Min', year=2007, month=1, day=1,
hour=23, minute=59)
ival_T_end_of_hour = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=59)
ival_T_to_A = Period(freq='A', year=2007)
ival_T_to_Q = Period(freq='Q', year=2007, quarter=1)
ival_T_to_M = Period(freq='M', year=2007, month=1)
ival_T_to_W = Period(freq='WK', year=2007, month=1, day=7)
ival_T_to_D = Period(freq='D', year=2007, month=1, day=1)
ival_T_to_B = Period(freq='B', year=2007, month=1, day=1)
ival_T_to_H = Period(freq='H', year=2007, month=1, day=1, hour=0)
ival_T_to_S_start = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
ival_T_to_S_end = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=59)
assert_equal(ival_T.asfreq('A'), ival_T_to_A)
assert_equal(ival_T_end_of_year.asfreq('A'), ival_T_to_A)
assert_equal(ival_T.asfreq('Q'), ival_T_to_Q)
assert_equal(ival_T_end_of_quarter.asfreq('Q'), ival_T_to_Q)
assert_equal(ival_T.asfreq('M'), ival_T_to_M)
assert_equal(ival_T_end_of_month.asfreq('M'), ival_T_to_M)
assert_equal(ival_T.asfreq('WK'), ival_T_to_W)
assert_equal(ival_T_end_of_week.asfreq('WK'), ival_T_to_W)
assert_equal(ival_T.asfreq('D'), ival_T_to_D)
assert_equal(ival_T_end_of_day.asfreq('D'), ival_T_to_D)
assert_equal(ival_T.asfreq('B'), ival_T_to_B)
assert_equal(ival_T_end_of_bus.asfreq('B'), ival_T_to_B)
assert_equal(ival_T.asfreq('H'), ival_T_to_H)
assert_equal(ival_T_end_of_hour.asfreq('H'), ival_T_to_H)
assert_equal(ival_T.asfreq('S', 'S'), ival_T_to_S_start)
assert_equal(ival_T.asfreq('S', 'E'), ival_T_to_S_end)
assert_equal(ival_T.asfreq('Min'), ival_T)
def test_conv_secondly(self):
# frequency conversion tests: from Secondly Frequency"
ival_S = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
ival_S_end_of_year = Period(freq='S', year=2007, month=12, day=31,
hour=23, minute=59, second=59)
ival_S_end_of_quarter = Period(freq='S', year=2007, month=3, day=31,
hour=23, minute=59, second=59)
ival_S_end_of_month = Period(freq='S', year=2007, month=1, day=31,
hour=23, minute=59, second=59)
ival_S_end_of_week = Period(freq='S', year=2007, month=1, day=7,
hour=23, minute=59, second=59)
ival_S_end_of_day = Period(freq='S', year=2007, month=1, day=1,
hour=23, minute=59, second=59)
ival_S_end_of_bus = Period(freq='S', year=2007, month=1, day=1,
hour=23, minute=59, second=59)
ival_S_end_of_hour = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=59, second=59)
ival_S_end_of_minute = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=59)
ival_S_to_A = Period(freq='A', year=2007)
ival_S_to_Q = Period(freq='Q', year=2007, quarter=1)
ival_S_to_M = Period(freq='M', year=2007, month=1)
ival_S_to_W = Period(freq='WK', year=2007, month=1, day=7)
ival_S_to_D = Period(freq='D', year=2007, month=1, day=1)
ival_S_to_B = Period(freq='B', year=2007, month=1, day=1)
ival_S_to_H = Period(freq='H', year=2007, month=1, day=1,
hour=0)
ival_S_to_T = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
assert_equal(ival_S.asfreq('A'), ival_S_to_A)
assert_equal(ival_S_end_of_year.asfreq('A'), ival_S_to_A)
assert_equal(ival_S.asfreq('Q'), ival_S_to_Q)
assert_equal(ival_S_end_of_quarter.asfreq('Q'), ival_S_to_Q)
assert_equal(ival_S.asfreq('M'), ival_S_to_M)
assert_equal(ival_S_end_of_month.asfreq('M'), ival_S_to_M)
assert_equal(ival_S.asfreq('WK'), ival_S_to_W)
assert_equal(ival_S_end_of_week.asfreq('WK'), ival_S_to_W)
assert_equal(ival_S.asfreq('D'), ival_S_to_D)
assert_equal(ival_S_end_of_day.asfreq('D'), ival_S_to_D)
assert_equal(ival_S.asfreq('B'), ival_S_to_B)
assert_equal(ival_S_end_of_bus.asfreq('B'), ival_S_to_B)
assert_equal(ival_S.asfreq('H'), ival_S_to_H)
assert_equal(ival_S_end_of_hour.asfreq('H'), ival_S_to_H)
assert_equal(ival_S.asfreq('Min'), ival_S_to_T)
assert_equal(ival_S_end_of_minute.asfreq('Min'), ival_S_to_T)
assert_equal(ival_S.asfreq('S'), ival_S)
def test_asfreq_nat(self):
p = Period('NaT', freq='A')
result = p.asfreq('M')
self.assertEqual(result.ordinal, tslib.iNaT)
self.assertEqual(result.freq, 'M')
class TestPeriodIndex(tm.TestCase):
def setUp(self):
pass
def test_hash_error(self):
index = period_range('20010101', periods=10)
with tm.assertRaisesRegexp(TypeError,
"unhashable type: %r" %
type(index).__name__):
hash(index)
def test_make_time_series(self):
index = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2009')
series = Series(1, index=index)
tm.assert_isinstance(series, TimeSeries)
def test_astype(self):
idx = period_range('1990', '2009', freq='A')
result = idx.astype('i8')
self.assert_numpy_array_equal(result, idx.values)
def test_constructor_use_start_freq(self):
# GH #1118
p = Period('4/2/2012', freq='B')
index = PeriodIndex(start=p, periods=10)
expected = PeriodIndex(start='4/2/2012', periods=10, freq='B')
self.assertTrue(index.equals(expected))
def test_constructor_field_arrays(self):
# GH #1264
years = np.arange(1990, 2010).repeat(4)[2:-2]
quarters = np.tile(np.arange(1, 5), 20)[2:-2]
index = PeriodIndex(year=years, quarter=quarters, freq='Q-DEC')
expected = period_range('1990Q3', '2009Q2', freq='Q-DEC')
self.assertTrue(index.equals(expected))
self.assertRaises(
ValueError, PeriodIndex, year=years, quarter=quarters,
freq='2Q-DEC')
index = PeriodIndex(year=years, quarter=quarters)
self.assertTrue(index.equals(expected))
years = [2007, 2007, 2007]
months = [1, 2]
self.assertRaises(ValueError, PeriodIndex, year=years, month=months,
freq='M')
self.assertRaises(ValueError, PeriodIndex, year=years, month=months,
freq='2M')
self.assertRaises(ValueError, PeriodIndex, year=years, month=months,
freq='M', start=Period('2007-01', freq='M'))
years = [2007, 2007, 2007]
months = [1, 2, 3]
idx = PeriodIndex(year=years, month=months, freq='M')
exp = period_range('2007-01', periods=3, freq='M')
self.assertTrue(idx.equals(exp))
def test_constructor_U(self):
# U was used as undefined period
self.assertRaises(ValueError, period_range, '2007-1-1', periods=500,
freq='X')
def test_constructor_arrays_negative_year(self):
years = np.arange(1960, 2000).repeat(4)
quarters = np.tile(lrange(1, 5), 40)
pindex = PeriodIndex(year=years, quarter=quarters)
self.assert_numpy_array_equal(pindex.year, years)
self.assert_numpy_array_equal(pindex.quarter, quarters)
def test_constructor_invalid_quarters(self):
self.assertRaises(ValueError, PeriodIndex, year=lrange(2000, 2004),
quarter=lrange(4), freq='Q-DEC')
def test_constructor_corner(self):
self.assertRaises(ValueError, PeriodIndex, periods=10, freq='A')
start = Period('2007', freq='A-JUN')
end = Period('2010', freq='A-DEC')
self.assertRaises(ValueError, PeriodIndex, start=start, end=end)
self.assertRaises(ValueError, PeriodIndex, start=start)
self.assertRaises(ValueError, PeriodIndex, end=end)
result = period_range('2007-01', periods=10.5, freq='M')
exp = period_range('2007-01', periods=10, freq='M')
self.assertTrue(result.equals(exp))
def test_constructor_fromarraylike(self):
idx = period_range('2007-01', periods=20, freq='M')
self.assertRaises(ValueError, PeriodIndex, idx.values)
self.assertRaises(ValueError, PeriodIndex, list(idx.values))
self.assertRaises(ValueError, PeriodIndex,
data=Period('2007', freq='A'))
result = PeriodIndex(iter(idx))
self.assertTrue(result.equals(idx))
result = PeriodIndex(idx)
self.assertTrue(result.equals(idx))
result = PeriodIndex(idx, freq='M')
self.assertTrue(result.equals(idx))
result = PeriodIndex(idx, freq='D')
exp = idx.asfreq('D', 'e')
self.assertTrue(result.equals(exp))
def test_constructor_datetime64arr(self):
vals = np.arange(100000, 100000 + 10000, 100, dtype=np.int64)
vals = vals.view(np.dtype('M8[us]'))
self.assertRaises(ValueError, PeriodIndex, vals, freq='D')
def test_constructor_simple_new(self):
idx = period_range('2007-01', name='p', periods=20, freq='M')
result = idx._simple_new(idx, 'p', freq=idx.freq)
self.assertTrue(result.equals(idx))
result = idx._simple_new(idx.astype('i8'), 'p', freq=idx.freq)
self.assertTrue(result.equals(idx))
def test_constructor_nat(self):
self.assertRaises(
ValueError, period_range, start='NaT', end='2011-01-01', freq='M')
self.assertRaises(
ValueError, period_range, start='2011-01-01', end='NaT', freq='M')
def test_constructor_year_and_quarter(self):
year = pd.Series([2001, 2002, 2003])
quarter = year - 2000
idx = PeriodIndex(year=year, quarter=quarter)
strs = ['%dQ%d' % t for t in zip(quarter, year)]
lops = list(map(Period, strs))
p = PeriodIndex(lops)
tm.assert_index_equal(p, idx)
def test_is_(self):
create_index = lambda: PeriodIndex(freq='A', start='1/1/2001',
end='12/1/2009')
index = create_index()
self.assertEqual(index.is_(index), True)
self.assertEqual(index.is_(create_index()), False)
self.assertEqual(index.is_(index.view()), True)
self.assertEqual(index.is_(index.view().view().view().view().view()), True)
self.assertEqual(index.view().is_(index), True)
ind2 = index.view()
index.name = "Apple"
self.assertEqual(ind2.is_(index), True)
self.assertEqual(index.is_(index[:]), False)
self.assertEqual(index.is_(index.asfreq('M')), False)
self.assertEqual(index.is_(index.asfreq('A')), False)
self.assertEqual(index.is_(index - 2), False)
self.assertEqual(index.is_(index - 0), False)
def test_comp_period(self):
idx = period_range('2007-01', periods=20, freq='M')
result = idx < idx[10]
exp = idx.values < idx.values[10]
self.assert_numpy_array_equal(result, exp)
def test_getitem_ndim2(self):
idx = period_range('2007-01', periods=3, freq='M')
result = idx[:, None]
# MPL kludge
tm.assert_isinstance(result, PeriodIndex)
def test_getitem_partial(self):
rng = period_range('2007-01', periods=50, freq='M')
ts = Series(np.random.randn(len(rng)), rng)
self.assertRaises(KeyError, ts.__getitem__, '2006')
result = ts['2008']
self.assertTrue((result.index.year == 2008).all())
result = ts['2008':'2009']
self.assertEqual(len(result), 24)
result = ts['2008-1':'2009-12']
self.assertEqual(len(result), 24)
result = ts['2008Q1':'2009Q4']
self.assertEqual(len(result), 24)
result = ts[:'2009']
self.assertEqual(len(result), 36)
result = ts['2009':]
self.assertEqual(len(result), 50 - 24)
exp = result
result = ts[24:]
assert_series_equal(exp, result)
ts = ts[10:].append(ts[10:])
self.assertRaisesRegexp(
KeyError, "left slice bound for non-unique label: '2008'",
ts.__getitem__, slice('2008', '2009'))
def test_getitem_datetime(self):
rng = period_range(start='2012-01-01', periods=10, freq='W-MON')
ts = Series(lrange(len(rng)), index=rng)
dt1 = datetime(2011, 10, 2)
dt4 = datetime(2012, 4, 20)
rs = ts[dt1:dt4]
assert_series_equal(rs, ts)
def test_slice_with_negative_step(self):
ts = Series(np.arange(20),
period_range('2014-01', periods=20, freq='M'))
SLC = pd.IndexSlice
def assert_slices_equivalent(l_slc, i_slc):
assert_series_equal(ts[l_slc], ts.iloc[i_slc])
assert_series_equal(ts.loc[l_slc], ts.iloc[i_slc])
assert_series_equal(ts.ix[l_slc], ts.iloc[i_slc])
assert_slices_equivalent(SLC[Period('2014-10')::-1], SLC[9::-1])
assert_slices_equivalent(SLC['2014-10'::-1], SLC[9::-1])
assert_slices_equivalent(SLC[:Period('2014-10'):-1], SLC[:8:-1])
assert_slices_equivalent(SLC[:'2014-10':-1], SLC[:8:-1])
assert_slices_equivalent(SLC['2015-02':'2014-10':-1], SLC[13:8:-1])
assert_slices_equivalent(SLC[Period('2015-02'):Period('2014-10'):-1], SLC[13:8:-1])
assert_slices_equivalent(SLC['2015-02':Period('2014-10'):-1], SLC[13:8:-1])
assert_slices_equivalent(SLC[Period('2015-02'):'2014-10':-1], SLC[13:8:-1])
assert_slices_equivalent(SLC['2014-10':'2015-02':-1], SLC[:0])
def test_slice_with_zero_step_raises(self):
ts = Series(np.arange(20),
period_range('2014-01', periods=20, freq='M'))
self.assertRaisesRegexp(ValueError, 'slice step cannot be zero',
lambda: ts[::0])
self.assertRaisesRegexp(ValueError, 'slice step cannot be zero',
lambda: ts.loc[::0])
self.assertRaisesRegexp(ValueError, 'slice step cannot be zero',
lambda: ts.ix[::0])
def test_sub(self):
rng = period_range('2007-01', periods=50)
result = rng - 5
exp = rng + (-5)
self.assertTrue(result.equals(exp))
def test_periods_number_check(self):
self.assertRaises(
ValueError, period_range, '2011-1-1', '2012-1-1', 'B')
def test_tolist(self):
index = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2009')
rs = index.tolist()
[tm.assert_isinstance(x, Period) for x in rs]
recon = PeriodIndex(rs)
self.assertTrue(index.equals(recon))
def test_to_timestamp(self):
index = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2009')
series = Series(1, index=index, name='foo')
exp_index = date_range('1/1/2001', end='12/31/2009', freq='A-DEC')
result = series.to_timestamp(how='end')
self.assertTrue(result.index.equals(exp_index))
self.assertEqual(result.name, 'foo')
exp_index = date_range('1/1/2001', end='1/1/2009', freq='AS-JAN')
result = series.to_timestamp(how='start')
self.assertTrue(result.index.equals(exp_index))
def _get_with_delta(delta, freq='A-DEC'):
return date_range(to_datetime('1/1/2001') + delta,
to_datetime('12/31/2009') + delta, freq=freq)
delta = timedelta(hours=23)
result = series.to_timestamp('H', 'end')
exp_index = _get_with_delta(delta)
self.assertTrue(result.index.equals(exp_index))
delta = timedelta(hours=23, minutes=59)
result = series.to_timestamp('T', 'end')
exp_index = _get_with_delta(delta)
self.assertTrue(result.index.equals(exp_index))
result = series.to_timestamp('S', 'end')
delta = timedelta(hours=23, minutes=59, seconds=59)
exp_index = _get_with_delta(delta)
self.assertTrue(result.index.equals(exp_index))
self.assertRaises(ValueError, index.to_timestamp, '5t')
index = PeriodIndex(freq='H', start='1/1/2001', end='1/2/2001')
series = Series(1, index=index, name='foo')
exp_index = date_range('1/1/2001 00:59:59', end='1/2/2001 00:59:59',
freq='H')
result = series.to_timestamp(how='end')
self.assertTrue(result.index.equals(exp_index))
self.assertEqual(result.name, 'foo')
def test_to_timestamp_quarterly_bug(self):
years = np.arange(1960, 2000).repeat(4)
quarters = np.tile(lrange(1, 5), 40)
pindex = PeriodIndex(year=years, quarter=quarters)
stamps = pindex.to_timestamp('D', 'end')
expected = DatetimeIndex([x.to_timestamp('D', 'end') for x in pindex])
self.assertTrue(stamps.equals(expected))
def test_to_timestamp_preserve_name(self):
index = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2009',
name='foo')
self.assertEqual(index.name, 'foo')
conv = index.to_timestamp('D')
self.assertEqual(conv.name, 'foo')
def test_to_timestamp_repr_is_code(self):
zs=[Timestamp('99-04-17 00:00:00',tz='UTC'),
Timestamp('2001-04-17 00:00:00',tz='UTC'),
Timestamp('2001-04-17 00:00:00',tz='America/Los_Angeles'),
Timestamp('2001-04-17 00:00:00',tz=None)]
for z in zs:
self.assertEqual( eval(repr(z)), z)
def test_to_timestamp_period_nat(self):
# GH 7228
index = PeriodIndex(['NaT', '2011-01', '2011-02'], freq='M', name='idx')
result = index.to_timestamp('D')
expected = DatetimeIndex([pd.NaT, datetime(2011, 1, 1),
datetime(2011, 2, 1)], name='idx')
self.assertTrue(result.equals(expected))
self.assertEqual(result.name, 'idx')
result2 = result.to_period(freq='M')
self.assertTrue(result2.equals(index))
self.assertEqual(result2.name, 'idx')
def test_as_frame_columns(self):
rng = period_range('1/1/2000', periods=5)
df = DataFrame(randn(10, 5), columns=rng)
ts = df[rng[0]]
assert_series_equal(ts, df.ix[:, 0])
# GH # 1211
repr(df)
ts = df['1/1/2000']
assert_series_equal(ts, df.ix[:, 0])
def test_indexing(self):
# GH 4390, iat incorrectly indexing
index = period_range('1/1/2001', periods=10)
s = Series(randn(10), index=index)
expected = s[index[0]]
result = s.iat[0]
self.assertEqual(expected, result)
def test_frame_setitem(self):
rng = period_range('1/1/2000', periods=5)
rng.name = 'index'
df = DataFrame(randn(5, 3), index=rng)
df['Index'] = rng
rs = Index(df['Index'])
self.assertTrue(rs.equals(rng))
rs = df.reset_index().set_index('index')
tm.assert_isinstance(rs.index, PeriodIndex)
self.assertTrue(rs.index.equals(rng))
def test_period_set_index_reindex(self):
# GH 6631
df = DataFrame(np.random.random(6))
idx1 = period_range('2011/01/01', periods=6, freq='M')
idx2 = period_range('2013', periods=6, freq='A')
df = df.set_index(idx1)
self.assertTrue(df.index.equals(idx1))
df = df.set_index(idx2)
self.assertTrue(df.index.equals(idx2))
def test_nested_dict_frame_constructor(self):
rng = period_range('1/1/2000', periods=5)
df = DataFrame(randn(10, 5), columns=rng)
data = {}
for col in df.columns:
for row in df.index:
data.setdefault(col, {})[row] = df.get_value(row, col)
result = DataFrame(data, columns=rng)
tm.assert_frame_equal(result, df)
data = {}
for col in df.columns:
for row in df.index:
data.setdefault(row, {})[col] = df.get_value(row, col)
result = DataFrame(data, index=rng).T
tm.assert_frame_equal(result, df)
def test_frame_to_time_stamp(self):
K = 5
index = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2009')
df = DataFrame(randn(len(index), K), index=index)
df['mix'] = 'a'
exp_index = date_range('1/1/2001', end='12/31/2009', freq='A-DEC')
result = df.to_timestamp('D', 'end')
self.assertTrue(result.index.equals(exp_index))
assert_almost_equal(result.values, df.values)
exp_index = date_range('1/1/2001', end='1/1/2009', freq='AS-JAN')
result = df.to_timestamp('D', 'start')
self.assertTrue(result.index.equals(exp_index))
def _get_with_delta(delta, freq='A-DEC'):
return date_range(to_datetime('1/1/2001') + delta,
to_datetime('12/31/2009') + delta, freq=freq)
delta = timedelta(hours=23)
result = df.to_timestamp('H', 'end')
exp_index = _get_with_delta(delta)
self.assertTrue(result.index.equals(exp_index))
delta = timedelta(hours=23, minutes=59)
result = df.to_timestamp('T', 'end')
exp_index = _get_with_delta(delta)
self.assertTrue(result.index.equals(exp_index))
result = df.to_timestamp('S', 'end')
delta = timedelta(hours=23, minutes=59, seconds=59)
exp_index = _get_with_delta(delta)
self.assertTrue(result.index.equals(exp_index))
# columns
df = df.T
exp_index = date_range('1/1/2001', end='12/31/2009', freq='A-DEC')
result = df.to_timestamp('D', 'end', axis=1)
self.assertTrue(result.columns.equals(exp_index))
assert_almost_equal(result.values, df.values)
exp_index = date_range('1/1/2001', end='1/1/2009', freq='AS-JAN')
result = df.to_timestamp('D', 'start', axis=1)
self.assertTrue(result.columns.equals(exp_index))
delta = timedelta(hours=23)
result = df.to_timestamp('H', 'end', axis=1)
exp_index = _get_with_delta(delta)
self.assertTrue(result.columns.equals(exp_index))
delta = timedelta(hours=23, minutes=59)
result = df.to_timestamp('T', 'end', axis=1)
exp_index = _get_with_delta(delta)
self.assertTrue(result.columns.equals(exp_index))
result = df.to_timestamp('S', 'end', axis=1)
delta = timedelta(hours=23, minutes=59, seconds=59)
exp_index = _get_with_delta(delta)
self.assertTrue(result.columns.equals(exp_index))
# invalid axis
assertRaisesRegexp(ValueError, 'axis', df.to_timestamp, axis=2)
assertRaisesRegexp(ValueError, 'Only mult == 1', df.to_timestamp, '5t', axis=1)
def test_index_duplicate_periods(self):
# monotonic
idx = PeriodIndex([2000, 2007, 2007, 2009, 2009], freq='A-JUN')
ts = Series(np.random.randn(len(idx)), index=idx)
result = ts[2007]
expected = ts[1:3]
assert_series_equal(result, expected)
result[:] = 1
self.assertTrue((ts[1:3] == 1).all())
# not monotonic
idx = PeriodIndex([2000, 2007, 2007, 2009, 2007], freq='A-JUN')
ts = Series(np.random.randn(len(idx)), index=idx)
result = ts[2007]
expected = ts[idx == 2007]
assert_series_equal(result, expected)
def test_index_unique(self):
idx = PeriodIndex([2000, 2007, 2007, 2009, 2009], freq='A-JUN')
expected = PeriodIndex([2000, 2007, 2009], freq='A-JUN')
self.assert_numpy_array_equal(idx.unique(), expected.values)
self.assertEqual(idx.nunique(), 3)
idx = PeriodIndex([2000, 2007, 2007, 2009, 2007], freq='A-JUN', tz='US/Eastern')
expected = PeriodIndex([2000, 2007, 2009], freq='A-JUN', tz='US/Eastern')
self.assert_numpy_array_equal(idx.unique(), expected.values)
self.assertEqual(idx.nunique(), 3)
def test_constructor(self):
pi = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2009')
assert_equal(len(pi), 9)
pi = PeriodIndex(freq='Q', start='1/1/2001', end='12/1/2009')
assert_equal(len(pi), 4 * 9)
pi = PeriodIndex(freq='M', start='1/1/2001', end='12/1/2009')
assert_equal(len(pi), 12 * 9)
pi = PeriodIndex(freq='D', start='1/1/2001', end='12/31/2009')
assert_equal(len(pi), 365 * 9 + 2)
pi = PeriodIndex(freq='B', start='1/1/2001', end='12/31/2009')
assert_equal(len(pi), 261 * 9)
pi = PeriodIndex(freq='H', start='1/1/2001', end='12/31/2001 23:00')
assert_equal(len(pi), 365 * 24)
pi = PeriodIndex(freq='Min', start='1/1/2001', end='1/1/2001 23:59')
assert_equal(len(pi), 24 * 60)
pi = PeriodIndex(freq='S', start='1/1/2001', end='1/1/2001 23:59:59')
assert_equal(len(pi), 24 * 60 * 60)
start = Period('02-Apr-2005', 'B')
i1 = PeriodIndex(start=start, periods=20)
assert_equal(len(i1), 20)
assert_equal(i1.freq, start.freq)
assert_equal(i1[0], start)
end_intv = Period('2006-12-31', 'W')
i1 = PeriodIndex(end=end_intv, periods=10)
assert_equal(len(i1), 10)
assert_equal(i1.freq, end_intv.freq)
assert_equal(i1[-1], end_intv)
end_intv = Period('2006-12-31', '1w')
i2 = PeriodIndex(end=end_intv, periods=10)
assert_equal(len(i1), len(i2))
self.assertTrue((i1 == i2).all())
assert_equal(i1.freq, i2.freq)
end_intv = Period('2006-12-31', ('w', 1))
i2 = PeriodIndex(end=end_intv, periods=10)
assert_equal(len(i1), len(i2))
self.assertTrue((i1 == i2).all())
assert_equal(i1.freq, i2.freq)
try:
PeriodIndex(start=start, end=end_intv)
raise AssertionError('Cannot allow mixed freq for start and end')
except ValueError:
pass
end_intv = Period('2005-05-01', 'B')
i1 = PeriodIndex(start=start, end=end_intv)
try:
PeriodIndex(start=start)
raise AssertionError(
'Must specify periods if missing start or end')
except ValueError:
pass
# infer freq from first element
i2 = PeriodIndex([end_intv, Period('2005-05-05', 'B')])
assert_equal(len(i2), 2)
assert_equal(i2[0], end_intv)
i2 = PeriodIndex(np.array([end_intv, Period('2005-05-05', 'B')]))
assert_equal(len(i2), 2)
assert_equal(i2[0], end_intv)
# Mixed freq should fail
vals = [end_intv, Period('2006-12-31', 'w')]
self.assertRaises(ValueError, PeriodIndex, vals)
vals = np.array(vals)
self.assertRaises(ValueError, PeriodIndex, vals)
def test_shift(self):
pi1 = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2009')
pi2 = PeriodIndex(freq='A', start='1/1/2002', end='12/1/2010')
self.assertTrue(pi1.shift(0).equals(pi1))
assert_equal(len(pi1), len(pi2))
assert_equal(pi1.shift(1).values, pi2.values)
pi1 = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2009')
pi2 = PeriodIndex(freq='A', start='1/1/2000', end='12/1/2008')
assert_equal(len(pi1), len(pi2))
assert_equal(pi1.shift(-1).values, pi2.values)
pi1 = PeriodIndex(freq='M', start='1/1/2001', end='12/1/2009')
pi2 = PeriodIndex(freq='M', start='2/1/2001', end='1/1/2010')
assert_equal(len(pi1), len(pi2))
assert_equal(pi1.shift(1).values, pi2.values)
pi1 = PeriodIndex(freq='M', start='1/1/2001', end='12/1/2009')
pi2 = PeriodIndex(freq='M', start='12/1/2000', end='11/1/2009')
assert_equal(len(pi1), len(pi2))
assert_equal(pi1.shift(-1).values, pi2.values)
pi1 = PeriodIndex(freq='D', start='1/1/2001', end='12/1/2009')
pi2 = PeriodIndex(freq='D', start='1/2/2001', end='12/2/2009')
assert_equal(len(pi1), len(pi2))
assert_equal(pi1.shift(1).values, pi2.values)
pi1 = PeriodIndex(freq='D', start='1/1/2001', end='12/1/2009')
pi2 = PeriodIndex(freq='D', start='12/31/2000', end='11/30/2009')
assert_equal(len(pi1), len(pi2))
assert_equal(pi1.shift(-1).values, pi2.values)
def test_shift_nat(self):
idx = PeriodIndex(['2011-01', '2011-02', 'NaT', '2011-04'], freq='M', name='idx')
result = idx.shift(1)
expected = PeriodIndex(['2011-02', '2011-03', 'NaT', '2011-05'], freq='M', name='idx')
self.assertTrue(result.equals(expected))
self.assertEqual(result.name, expected.name)
def test_asfreq(self):
pi1 = PeriodIndex(freq='A', start='1/1/2001', end='1/1/2001')
pi2 = PeriodIndex(freq='Q', start='1/1/2001', end='1/1/2001')
pi3 = PeriodIndex(freq='M', start='1/1/2001', end='1/1/2001')
pi4 = PeriodIndex(freq='D', start='1/1/2001', end='1/1/2001')
pi5 = PeriodIndex(freq='H', start='1/1/2001', end='1/1/2001 00:00')
pi6 = PeriodIndex(freq='Min', start='1/1/2001', end='1/1/2001 00:00')
pi7 = PeriodIndex(freq='S', start='1/1/2001', end='1/1/2001 00:00:00')
self.assertEqual(pi1.asfreq('Q', 'S'), pi2)
self.assertEqual(pi1.asfreq('Q', 's'), pi2)
self.assertEqual(pi1.asfreq('M', 'start'), pi3)
self.assertEqual(pi1.asfreq('D', 'StarT'), pi4)
self.assertEqual(pi1.asfreq('H', 'beGIN'), pi5)
self.assertEqual(pi1.asfreq('Min', 'S'), pi6)
self.assertEqual(pi1.asfreq('S', 'S'), pi7)
self.assertEqual(pi2.asfreq('A', 'S'), pi1)
self.assertEqual(pi2.asfreq('M', 'S'), pi3)
self.assertEqual(pi2.asfreq('D', 'S'), pi4)
self.assertEqual(pi2.asfreq('H', 'S'), pi5)
self.assertEqual(pi2.asfreq('Min', 'S'), pi6)
self.assertEqual(pi2.asfreq('S', 'S'), pi7)
self.assertEqual(pi3.asfreq('A', 'S'), pi1)
self.assertEqual(pi3.asfreq('Q', 'S'), pi2)
self.assertEqual(pi3.asfreq('D', 'S'), pi4)
self.assertEqual(pi3.asfreq('H', 'S'), pi5)
self.assertEqual(pi3.asfreq('Min', 'S'), pi6)
self.assertEqual(pi3.asfreq('S', 'S'), pi7)
self.assertEqual(pi4.asfreq('A', 'S'), pi1)
self.assertEqual(pi4.asfreq('Q', 'S'), pi2)
self.assertEqual(pi4.asfreq('M', 'S'), pi3)
self.assertEqual(pi4.asfreq('H', 'S'), pi5)
self.assertEqual(pi4.asfreq('Min', 'S'), pi6)
self.assertEqual(pi4.asfreq('S', 'S'), pi7)
self.assertEqual(pi5.asfreq('A', 'S'), pi1)
self.assertEqual(pi5.asfreq('Q', 'S'), pi2)
self.assertEqual(pi5.asfreq('M', 'S'), pi3)
self.assertEqual(pi5.asfreq('D', 'S'), pi4)
self.assertEqual(pi5.asfreq('Min', 'S'), pi6)
self.assertEqual(pi5.asfreq('S', 'S'), pi7)
self.assertEqual(pi6.asfreq('A', 'S'), pi1)
self.assertEqual(pi6.asfreq('Q', 'S'), pi2)
self.assertEqual(pi6.asfreq('M', 'S'), pi3)
self.assertEqual(pi6.asfreq('D', 'S'), pi4)
self.assertEqual(pi6.asfreq('H', 'S'), pi5)
self.assertEqual(pi6.asfreq('S', 'S'), pi7)
self.assertEqual(pi7.asfreq('A', 'S'), pi1)
self.assertEqual(pi7.asfreq('Q', 'S'), pi2)
self.assertEqual(pi7.asfreq('M', 'S'), pi3)
self.assertEqual(pi7.asfreq('D', 'S'), pi4)
self.assertEqual(pi7.asfreq('H', 'S'), pi5)
self.assertEqual(pi7.asfreq('Min', 'S'), pi6)
self.assertRaises(ValueError, pi7.asfreq, 'T', 'foo')
self.assertRaises(ValueError, pi1.asfreq, '5t')
def test_asfreq_nat(self):
idx = PeriodIndex(['2011-01', '2011-02', 'NaT', '2011-04'], freq='M')
result = idx.asfreq(freq='Q')
expected = PeriodIndex(['2011Q1', '2011Q1', 'NaT', '2011Q2'], freq='Q')
self.assertTrue(result.equals(expected))
def test_period_index_length(self):
pi = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2009')
assert_equal(len(pi), 9)
pi = PeriodIndex(freq='Q', start='1/1/2001', end='12/1/2009')
assert_equal(len(pi), 4 * 9)
pi = PeriodIndex(freq='M', start='1/1/2001', end='12/1/2009')
assert_equal(len(pi), 12 * 9)
start = Period('02-Apr-2005', 'B')
i1 = PeriodIndex(start=start, periods=20)
assert_equal(len(i1), 20)
assert_equal(i1.freq, start.freq)
assert_equal(i1[0], start)
end_intv = Period('2006-12-31', 'W')
i1 = PeriodIndex(end=end_intv, periods=10)
assert_equal(len(i1), 10)
assert_equal(i1.freq, end_intv.freq)
assert_equal(i1[-1], end_intv)
end_intv = Period('2006-12-31', '1w')
i2 = PeriodIndex(end=end_intv, periods=10)
assert_equal(len(i1), len(i2))
self.assertTrue((i1 == i2).all())
assert_equal(i1.freq, i2.freq)
end_intv = Period('2006-12-31', ('w', 1))
i2 = PeriodIndex(end=end_intv, periods=10)
assert_equal(len(i1), len(i2))
self.assertTrue((i1 == i2).all())
assert_equal(i1.freq, i2.freq)
try:
PeriodIndex(start=start, end=end_intv)
raise AssertionError('Cannot allow mixed freq for start and end')
except ValueError:
pass
end_intv = Period('2005-05-01', 'B')
i1 = PeriodIndex(start=start, end=end_intv)
try:
PeriodIndex(start=start)
raise AssertionError(
'Must specify periods if missing start or end')
except ValueError:
pass
# infer freq from first element
i2 = PeriodIndex([end_intv, Period('2005-05-05', 'B')])
assert_equal(len(i2), 2)
assert_equal(i2[0], end_intv)
i2 = PeriodIndex(np.array([end_intv, Period('2005-05-05', 'B')]))
assert_equal(len(i2), 2)
assert_equal(i2[0], end_intv)
# Mixed freq should fail
vals = [end_intv, Period('2006-12-31', 'w')]
self.assertRaises(ValueError, PeriodIndex, vals)
vals = np.array(vals)
self.assertRaises(ValueError, PeriodIndex, vals)
def test_frame_index_to_string(self):
index = PeriodIndex(['2011-1', '2011-2', '2011-3'], freq='M')
frame = DataFrame(np.random.randn(3, 4), index=index)
# it works!
frame.to_string()
def test_asfreq_ts(self):
index = PeriodIndex(freq='A', start='1/1/2001', end='12/31/2010')
ts = Series(np.random.randn(len(index)), index=index)
df = DataFrame(np.random.randn(len(index), 3), index=index)
result = ts.asfreq('D', how='end')
df_result = df.asfreq('D', how='end')
exp_index = index.asfreq('D', how='end')
self.assertEqual(len(result), len(ts))
self.assertTrue(result.index.equals(exp_index))
self.assertTrue(df_result.index.equals(exp_index))
result = ts.asfreq('D', how='start')
self.assertEqual(len(result), len(ts))
self.assertTrue(result.index.equals(index.asfreq('D', how='start')))
def test_badinput(self):
self.assertRaises(datetools.DateParseError, Period, '1/1/-2000', 'A')
# self.assertRaises(datetools.DateParseError, Period, '-2000', 'A')
# self.assertRaises(datetools.DateParseError, Period, '0', 'A')
def test_negative_ordinals(self):
p = Period(ordinal=-1000, freq='A')
p = Period(ordinal=0, freq='A')
idx1 = PeriodIndex(ordinal=[-1, 0, 1], freq='A')
idx2 = PeriodIndex(ordinal=np.array([-1, 0, 1]), freq='A')
assert_array_equal(idx1,idx2)
def test_dti_to_period(self):
dti = DatetimeIndex(start='1/1/2005', end='12/1/2005', freq='M')
pi1 = dti.to_period()
pi2 = dti.to_period(freq='D')
self.assertEqual(pi1[0], Period('Jan 2005', freq='M'))
self.assertEqual(pi2[0], Period('1/31/2005', freq='D'))
self.assertEqual(pi1[-1], Period('Nov 2005', freq='M'))
self.assertEqual(pi2[-1], Period('11/30/2005', freq='D'))
def test_pindex_slice_index(self):
pi = PeriodIndex(start='1/1/10', end='12/31/12', freq='M')
s = Series(np.random.rand(len(pi)), index=pi)
res = s['2010']
exp = s[0:12]
assert_series_equal(res, exp)
res = s['2011']
exp = s[12:24]
assert_series_equal(res, exp)
def test_getitem_day(self):
# GH 6716
# Confirm DatetimeIndex and PeriodIndex works identically
didx = DatetimeIndex(start='2013/01/01', freq='D', periods=400)
pidx = PeriodIndex(start='2013/01/01', freq='D', periods=400)
for idx in [didx, pidx]:
# getitem against index should raise ValueError
values = ['2014', '2013/02', '2013/01/02',
'2013/02/01 9H', '2013/02/01 09:00']
for v in values:
if _np_version_under1p9:
with tm.assertRaises(ValueError):
idx[v]
else:
# GH7116
# these show deprecations as we are trying
# to slice with non-integer indexers
#with tm.assertRaises(IndexError):
# idx[v]
continue
s = Series(np.random.rand(len(idx)), index=idx)
assert_series_equal(s['2013/01'], s[0:31])
assert_series_equal(s['2013/02'], s[31:59])
assert_series_equal(s['2014'], s[365:])
invalid = ['2013/02/01 9H', '2013/02/01 09:00']
for v in invalid:
with tm.assertRaises(KeyError):
s[v]
def test_range_slice_day(self):
# GH 6716
didx = DatetimeIndex(start='2013/01/01', freq='D', periods=400)
pidx = PeriodIndex(start='2013/01/01', freq='D', periods=400)
for idx in [didx, pidx]:
# slices against index should raise IndexError
values = ['2014', '2013/02', '2013/01/02',
'2013/02/01 9H', '2013/02/01 09:00']
for v in values:
with tm.assertRaises(IndexError):
idx[v:]
s = Series(np.random.rand(len(idx)), index=idx)
assert_series_equal(s['2013/01/02':], s[1:])
assert_series_equal(s['2013/01/02':'2013/01/05'], s[1:5])
assert_series_equal(s['2013/02':], s[31:])
assert_series_equal(s['2014':], s[365:])
invalid = ['2013/02/01 9H', '2013/02/01 09:00']
for v in invalid:
with tm.assertRaises(IndexError):
idx[v:]
def test_getitem_seconds(self):
# GH 6716
didx = DatetimeIndex(start='2013/01/01 09:00:00', freq='S', periods=4000)
pidx = PeriodIndex(start='2013/01/01 09:00:00', freq='S', periods=4000)
for idx in [didx, pidx]:
# getitem against index should raise ValueError
values = ['2014', '2013/02', '2013/01/02',
'2013/02/01 9H', '2013/02/01 09:00']
for v in values:
if _np_version_under1p9:
with tm.assertRaises(ValueError):
idx[v]
else:
# GH7116
# these show deprecations as we are trying
# to slice with non-integer indexers
#with tm.assertRaises(IndexError):
# idx[v]
continue
s = Series(np.random.rand(len(idx)), index=idx)
assert_series_equal(s['2013/01/01 10:00'], s[3600:3660])
assert_series_equal(s['2013/01/01 9H'], s[:3600])
for d in ['2013/01/01', '2013/01', '2013']:
assert_series_equal(s[d], s)
def test_range_slice_seconds(self):
# GH 6716
didx = DatetimeIndex(start='2013/01/01 09:00:00', freq='S', periods=4000)
pidx = PeriodIndex(start='2013/01/01 09:00:00', freq='S', periods=4000)
for idx in [didx, pidx]:
# slices against index should raise IndexError
values = ['2014', '2013/02', '2013/01/02',
'2013/02/01 9H', '2013/02/01 09:00']
for v in values:
with tm.assertRaises(IndexError):
idx[v:]
s = Series(np.random.rand(len(idx)), index=idx)
assert_series_equal(s['2013/01/01 09:05':'2013/01/01 09:10'], s[300:660])
assert_series_equal(s['2013/01/01 10:00':'2013/01/01 10:05'], s[3600:3960])
assert_series_equal(s['2013/01/01 10H':], s[3600:])
assert_series_equal(s[:'2013/01/01 09:30'], s[:1860])
for d in ['2013/01/01', '2013/01', '2013']:
assert_series_equal(s[d:], s)
def test_range_slice_outofbounds(self):
# GH 5407
didx = DatetimeIndex(start='2013/10/01', freq='D', periods=10)
pidx = PeriodIndex(start='2013/10/01', freq='D', periods=10)
for idx in [didx, pidx]:
df = DataFrame(dict(units=[100 + i for i in range(10)]), index=idx)
empty = DataFrame(index=idx.__class__([], freq='D'), columns=['units'])
empty['units'] = empty['units'].astype('int64')
tm.assert_frame_equal(df['2013/09/01':'2013/09/30'], empty)
tm.assert_frame_equal(df['2013/09/30':'2013/10/02'], df.iloc[:2])
tm.assert_frame_equal(df['2013/10/01':'2013/10/02'], df.iloc[:2])
tm.assert_frame_equal(df['2013/10/02':'2013/09/30'], empty)
tm.assert_frame_equal(df['2013/10/15':'2013/10/17'], empty)
tm.assert_frame_equal(df['2013-06':'2013-09'], empty)
tm.assert_frame_equal(df['2013-11':'2013-12'], empty)
def test_pindex_fieldaccessor_nat(self):
idx = PeriodIndex(['2011-01', '2011-02', 'NaT', '2012-03', '2012-04'], freq='D')
self.assert_numpy_array_equal(idx.year, np.array([2011, 2011, -1, 2012, 2012]))
self.assert_numpy_array_equal(idx.month, np.array([1, 2, -1, 3, 4]))
def test_pindex_qaccess(self):
pi = PeriodIndex(['2Q05', '3Q05', '4Q05', '1Q06', '2Q06'], freq='Q')
s = Series(np.random.rand(len(pi)), index=pi).cumsum()
# Todo: fix these accessors!
self.assertEqual(s['05Q4'], s[2])
def test_period_dt64_round_trip(self):
dti = date_range('1/1/2000', '1/7/2002', freq='B')
pi = dti.to_period()
self.assertTrue(pi.to_timestamp().equals(dti))
dti = date_range('1/1/2000', '1/7/2002', freq='B')
pi = dti.to_period(freq='H')
self.assertTrue(pi.to_timestamp().equals(dti))
def test_to_period_quarterly(self):
# make sure we can make the round trip
for month in MONTHS:
freq = 'Q-%s' % month
rng = period_range('1989Q3', '1991Q3', freq=freq)
stamps = rng.to_timestamp()
result = stamps.to_period(freq)
self.assertTrue(rng.equals(result))
def test_to_period_quarterlyish(self):
offsets = ['BQ', 'QS', 'BQS']
for off in offsets:
rng = date_range('01-Jan-2012', periods=8, freq=off)
prng = rng.to_period()
self.assertEqual(prng.freq, 'Q-DEC')
def test_to_period_annualish(self):
offsets = ['BA', 'AS', 'BAS']
for off in offsets:
rng = date_range('01-Jan-2012', periods=8, freq=off)
prng = rng.to_period()
self.assertEqual(prng.freq, 'A-DEC')
def test_to_period_monthish(self):
offsets = ['MS', 'EOM', 'BM']
for off in offsets:
rng = date_range('01-Jan-2012', periods=8, freq=off)
prng = rng.to_period()
self.assertEqual(prng.freq, 'M')
def test_no_multiples(self):
self.assertRaises(ValueError, period_range, '1989Q3', periods=10,
freq='2Q')
self.assertRaises(ValueError, period_range, '1989', periods=10,
freq='2A')
self.assertRaises(ValueError, Period, '1989', freq='2A')
# def test_pindex_multiples(self):
# pi = PeriodIndex(start='1/1/10', end='12/31/12', freq='2M')
# self.assertEqual(pi[0], Period('1/1/10', '2M'))
# self.assertEqual(pi[1], Period('3/1/10', '2M'))
# self.assertEqual(pi[0].asfreq('6M'), pi[2].asfreq('6M'))
# self.assertEqual(pi[0].asfreq('A'), pi[2].asfreq('A'))
# self.assertEqual(pi[0].asfreq('M', how='S'),
# Period('Jan 2010', '1M'))
# self.assertEqual(pi[0].asfreq('M', how='E'),
# Period('Feb 2010', '1M'))
# self.assertEqual(pi[1].asfreq('M', how='S'),
# Period('Mar 2010', '1M'))
# i = Period('1/1/2010 12:05:18', '5S')
# self.assertEqual(i, Period('1/1/2010 12:05:15', '5S'))
# i = Period('1/1/2010 12:05:18', '5S')
# self.assertEqual(i.asfreq('1S', how='E'),
# Period('1/1/2010 12:05:19', '1S'))
def test_iteration(self):
index = PeriodIndex(start='1/1/10', periods=4, freq='B')
result = list(index)
tm.assert_isinstance(result[0], Period)
self.assertEqual(result[0].freq, index.freq)
def test_take(self):
index = PeriodIndex(start='1/1/10', end='12/31/12', freq='D', name='idx')
expected = PeriodIndex([datetime(2010, 1, 6), datetime(2010, 1, 7),
datetime(2010, 1, 9), datetime(2010, 1, 13)],
freq='D', name='idx')
taken1 = index.take([5, 6, 8, 12])
taken2 = index[[5, 6, 8, 12]]
for taken in [taken1, taken2]:
self.assertTrue(taken.equals(expected))
tm.assert_isinstance(taken, PeriodIndex)
self.assertEqual(taken.freq, index.freq)
self.assertEqual(taken.name, expected.name)
def test_joins(self):
index = period_range('1/1/2000', '1/20/2000', freq='D')
for kind in ['inner', 'outer', 'left', 'right']:
joined = index.join(index[:-5], how=kind)
tm.assert_isinstance(joined, PeriodIndex)
self.assertEqual(joined.freq, index.freq)
def test_join_self(self):
index = period_range('1/1/2000', '1/20/2000', freq='D')
for kind in ['inner', 'outer', 'left', 'right']:
res = index.join(index, how=kind)
self.assertIs(index, res)
def test_join_does_not_recur(self):
df = tm.makeCustomDataframe(3, 2, data_gen_f=lambda *args:
np.random.randint(2), c_idx_type='p',
r_idx_type='dt')
s = df.iloc[:2, 0]
res = s.index.join(df.columns, how='outer')
expected = Index([s.index[0], s.index[1],
df.columns[0], df.columns[1]], object)
tm.assert_index_equal(res, expected)
def test_align_series(self):
rng = period_range('1/1/2000', '1/1/2010', freq='A')
ts = Series(np.random.randn(len(rng)), index=rng)
result = ts + ts[::2]
expected = ts + ts
expected[1::2] = np.nan
assert_series_equal(result, expected)
result = ts + _permute(ts[::2])
assert_series_equal(result, expected)
# it works!
for kind in ['inner', 'outer', 'left', 'right']:
ts.align(ts[::2], join=kind)
with assertRaisesRegexp(ValueError, 'Only like-indexed'):
ts + ts.asfreq('D', how="end")
def test_align_frame(self):
rng = period_range('1/1/2000', '1/1/2010', freq='A')
ts = DataFrame(np.random.randn(len(rng), 3), index=rng)
result = ts + ts[::2]
expected = ts + ts
expected.values[1::2] = np.nan
tm.assert_frame_equal(result, expected)
result = ts + _permute(ts[::2])
tm.assert_frame_equal(result, expected)
def test_union(self):
index = period_range('1/1/2000', '1/20/2000', freq='D')
result = index[:-5].union(index[10:])
self.assertTrue(result.equals(index))
# not in order
result = _permute(index[:-5]).union(_permute(index[10:]))
self.assertTrue(result.equals(index))
# raise if different frequencies
index = period_range('1/1/2000', '1/20/2000', freq='D')
index2 = period_range('1/1/2000', '1/20/2000', freq='W-WED')
self.assertRaises(ValueError, index.union, index2)
self.assertRaises(ValueError, index.join, index.to_timestamp())
def test_intersection(self):
index = period_range('1/1/2000', '1/20/2000', freq='D')
result = index[:-5].intersection(index[10:])
self.assertTrue(result.equals(index[10:-5]))
# not in order
left = _permute(index[:-5])
right = _permute(index[10:])
result = left.intersection(right).order()
self.assertTrue(result.equals(index[10:-5]))
# raise if different frequencies
index = period_range('1/1/2000', '1/20/2000', freq='D')
index2 = period_range('1/1/2000', '1/20/2000', freq='W-WED')
self.assertRaises(ValueError, index.intersection, index2)
def test_fields(self):
# year, month, day, hour, minute
# second, weekofyear, week, dayofweek, weekday, dayofyear, quarter
# qyear
pi = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2005')
self._check_all_fields(pi)
pi = PeriodIndex(freq='Q', start='1/1/2001', end='12/1/2002')
self._check_all_fields(pi)
pi = PeriodIndex(freq='M', start='1/1/2001', end='1/1/2002')
self._check_all_fields(pi)
pi = PeriodIndex(freq='D', start='12/1/2001', end='6/1/2001')
self._check_all_fields(pi)
pi = PeriodIndex(freq='B', start='12/1/2001', end='6/1/2001')
self._check_all_fields(pi)
pi = PeriodIndex(freq='H', start='12/31/2001', end='1/1/2002 23:00')
self._check_all_fields(pi)
pi = PeriodIndex(freq='Min', start='12/31/2001', end='1/1/2002 00:20')
self._check_all_fields(pi)
pi = PeriodIndex(freq='S', start='12/31/2001 00:00:00',
end='12/31/2001 00:05:00')
self._check_all_fields(pi)
end_intv = Period('2006-12-31', 'W')
i1 = PeriodIndex(end=end_intv, periods=10)
self._check_all_fields(i1)
def _check_all_fields(self, periodindex):
fields = ['year', 'month', 'day', 'hour', 'minute',
'second', 'weekofyear', 'week', 'dayofweek',
'weekday', 'dayofyear', 'quarter', 'qyear', 'days_in_month']
periods = list(periodindex)
for field in fields:
field_idx = getattr(periodindex, field)
assert_equal(len(periodindex), len(field_idx))
for x, val in zip(periods, field_idx):
assert_equal(getattr(x, field), val)
def test_is_full(self):
index = PeriodIndex([2005, 2007, 2009], freq='A')
self.assertFalse(index.is_full)
index = PeriodIndex([2005, 2006, 2007], freq='A')
self.assertTrue(index.is_full)
index = PeriodIndex([2005, 2005, 2007], freq='A')
self.assertFalse(index.is_full)
index = PeriodIndex([2005, 2005, 2006], freq='A')
self.assertTrue(index.is_full)
index = PeriodIndex([2006, 2005, 2005], freq='A')
self.assertRaises(ValueError, getattr, index, 'is_full')
self.assertTrue(index[:0].is_full)
def test_map(self):
index = PeriodIndex([2005, 2007, 2009], freq='A')
result = index.map(lambda x: x + 1)
expected = index + 1
self.assertTrue(result.equals(expected))
result = index.map(lambda x: x.ordinal)
exp = [x.ordinal for x in index]
assert_array_equal(result, exp)
def test_map_with_string_constructor(self):
raw = [2005, 2007, 2009]
index = PeriodIndex(raw, freq='A')
types = str,
if compat.PY3:
# unicode
types += compat.text_type,
for t in types:
expected = np.array(lmap(t, raw), dtype=object)
res = index.map(t)
# should return an array
tm.assert_isinstance(res, np.ndarray)
# preserve element types
self.assertTrue(all(isinstance(resi, t) for resi in res))
# dtype should be object
self.assertEqual(res.dtype, np.dtype('object').type)
# lastly, values should compare equal
assert_array_equal(res, expected)
def test_convert_array_of_periods(self):
rng = period_range('1/1/2000', periods=20, freq='D')
periods = list(rng)
result = pd.Index(periods)
tm.assert_isinstance(result, PeriodIndex)
def test_with_multi_index(self):
# #1705
index = date_range('1/1/2012', periods=4, freq='12H')
index_as_arrays = [index.to_period(freq='D'), index.hour]
s = Series([0, 1, 2, 3], index_as_arrays)
tm.assert_isinstance(s.index.levels[0], PeriodIndex)
tm.assert_isinstance(s.index.values[0][0], Period)
def test_to_datetime_1703(self):
index = period_range('1/1/2012', periods=4, freq='D')
result = index.to_datetime()
self.assertEqual(result[0], Timestamp('1/1/2012'))
def test_get_loc_msg(self):
idx = period_range('2000-1-1', freq='A', periods=10)
bad_period = Period('2012', 'A')
self.assertRaises(KeyError, idx.get_loc, bad_period)
try:
idx.get_loc(bad_period)
except KeyError as inst:
self.assertEqual(inst.args[0], bad_period)
def test_append_concat(self):
# #1815
d1 = date_range('12/31/1990', '12/31/1999', freq='A-DEC')
d2 = date_range('12/31/2000', '12/31/2009', freq='A-DEC')
s1 = Series(np.random.randn(10), d1)
s2 = Series(np.random.randn(10), d2)
s1 = s1.to_period()
s2 = s2.to_period()
# drops index
result = pd.concat([s1, s2])
tm.assert_isinstance(result.index, PeriodIndex)
self.assertEqual(result.index[0], s1.index[0])
def test_pickle_freq(self):
# GH2891
import pickle
prng = period_range('1/1/2011', '1/1/2012', freq='M')
new_prng = self.round_trip_pickle(prng)
self.assertEqual(new_prng.freq,'M')
def test_slice_keep_name(self):
idx = period_range('20010101', periods=10, freq='D', name='bob')
self.assertEqual(idx.name, idx[1:].name)
def test_factorize(self):
idx1 = PeriodIndex(['2014-01', '2014-01', '2014-02', '2014-02',
'2014-03', '2014-03'], freq='M')
exp_arr = np.array([0, 0, 1, 1, 2, 2])
exp_idx = PeriodIndex(['2014-01', '2014-02', '2014-03'], freq='M')
arr, idx = idx1.factorize()
self.assert_numpy_array_equal(arr, exp_arr)
self.assertTrue(idx.equals(exp_idx))
arr, idx = idx1.factorize(sort=True)
self.assert_numpy_array_equal(arr, exp_arr)
self.assertTrue(idx.equals(exp_idx))
idx2 = pd.PeriodIndex(['2014-03', '2014-03', '2014-02', '2014-01',
'2014-03', '2014-01'], freq='M')
exp_arr = np.array([2, 2, 1, 0, 2, 0])
arr, idx = idx2.factorize(sort=True)
self.assert_numpy_array_equal(arr, exp_arr)
self.assertTrue(idx.equals(exp_idx))
exp_arr = np.array([0, 0, 1, 2, 0, 2])
exp_idx = PeriodIndex(['2014-03', '2014-02', '2014-01'], freq='M')
arr, idx = idx2.factorize()
self.assert_numpy_array_equal(arr, exp_arr)
self.assertTrue(idx.equals(exp_idx))
def test_recreate_from_data(self):
for o in ['M', 'Q', 'A', 'D', 'B', 'T', 'S', 'L', 'U', 'N', 'H']:
org = PeriodIndex(start='2001/04/01', freq=o, periods=1)
idx = PeriodIndex(org.values, freq=o)
self.assertTrue(idx.equals(org))
def test_combine_first(self):
# GH 3367
didx = pd.DatetimeIndex(start='1950-01-31', end='1950-07-31', freq='M')
pidx = pd.PeriodIndex(start=pd.Period('1950-1'), end=pd.Period('1950-7'), freq='M')
# check to be consistent with DatetimeIndex
for idx in [didx, pidx]:
a = pd.Series([1, np.nan, np.nan, 4, 5, np.nan, 7], index=idx)
b = pd.Series([9, 9, 9, 9, 9, 9, 9], index=idx)
result = a.combine_first(b)
expected = pd.Series([1, 9, 9, 4, 5, 9, 7], index=idx, dtype=np.float64)
tm.assert_series_equal(result, expected)
def test_searchsorted(self):
pidx = pd.period_range('2014-01-01', periods=10, freq='D')
self.assertEqual(
pidx.searchsorted(pd.Period('2014-01-01', freq='D')), 0)
self.assertRaisesRegexp(
ValueError, 'Different period frequency: H',
lambda: pidx.searchsorted(pd.Period('2014-01-01', freq='H')))
def _permute(obj):
return obj.take(np.random.permutation(len(obj)))
class TestMethods(tm.TestCase):
"Base test class for MaskedArrays."
def test_add(self):
dt1 = Period(freq='D', year=2008, month=1, day=1)
dt2 = Period(freq='D', year=2008, month=1, day=2)
assert_equal(dt1 + 1, dt2)
#
# GH 4731
msg = "unsupported operand type\(s\)"
with tm.assertRaisesRegexp(TypeError, msg):
dt1 + "str"
with tm.assertRaisesRegexp(TypeError, msg):
dt1 + dt2
def test_add_offset(self):
# freq is DateOffset
p = Period('2011', freq='A')
self.assertEqual(p + offsets.YearEnd(2), Period('2013', freq='A'))
for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(),
np.timedelta64(365, 'D'), timedelta(365)]:
with tm.assertRaisesRegexp(ValueError, 'Input has different freq from Period'):
p + o
p = Period('2011-03', freq='M')
self.assertEqual(p + offsets.MonthEnd(2), Period('2011-05', freq='M'))
self.assertEqual(p + offsets.MonthEnd(12), Period('2012-03', freq='M'))
for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(),
np.timedelta64(365, 'D'), timedelta(365)]:
with tm.assertRaisesRegexp(ValueError, 'Input has different freq from Period'):
p + o
# freq is Tick
p = Period('2011-04-01', freq='D')
self.assertEqual(p + offsets.Day(5), Period('2011-04-06', freq='D'))
self.assertEqual(p + offsets.Hour(24), Period('2011-04-02', freq='D'))
self.assertEqual(p + np.timedelta64(2, 'D'), Period('2011-04-03', freq='D'))
self.assertEqual(p + np.timedelta64(3600 * 24, 's'), Period('2011-04-02', freq='D'))
self.assertEqual(p + timedelta(-2), Period('2011-03-30', freq='D'))
self.assertEqual(p + timedelta(hours=48), Period('2011-04-03', freq='D'))
for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(),
np.timedelta64(4, 'h'), timedelta(hours=23)]:
with tm.assertRaisesRegexp(ValueError, 'Input has different freq from Period'):
p + o
p = Period('2011-04-01 09:00', freq='H')
self.assertEqual(p + offsets.Day(2), Period('2011-04-03 09:00', freq='H'))
self.assertEqual(p + offsets.Hour(3), Period('2011-04-01 12:00', freq='H'))
self.assertEqual(p + np.timedelta64(3, 'h'), Period('2011-04-01 12:00', freq='H'))
self.assertEqual(p + np.timedelta64(3600, 's'), Period('2011-04-01 10:00', freq='H'))
self.assertEqual(p + timedelta(minutes=120), Period('2011-04-01 11:00', freq='H'))
self.assertEqual(p + timedelta(days=4, minutes=180), Period('2011-04-05 12:00', freq='H'))
for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(),
np.timedelta64(3200, 's'), timedelta(hours=23, minutes=30)]:
with tm.assertRaisesRegexp(ValueError, 'Input has different freq from Period'):
p + o
def test_add_offset_nat(self):
# freq is DateOffset
p = Period('NaT', freq='A')
for o in [offsets.YearEnd(2)]:
self.assertEqual((p + o).ordinal, tslib.iNaT)
for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(),
np.timedelta64(365, 'D'), timedelta(365)]:
with tm.assertRaises(ValueError):
p + o
p = Period('NaT', freq='M')
for o in [offsets.MonthEnd(2), offsets.MonthEnd(12)]:
self.assertEqual((p + o).ordinal, tslib.iNaT)
for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(),
np.timedelta64(365, 'D'), timedelta(365)]:
with tm.assertRaisesRegexp(ValueError, 'Input has different freq from Period'):
p + o
# freq is Tick
p = Period('NaT', freq='D')
for o in [offsets.Day(5), offsets.Hour(24), np.timedelta64(2, 'D'),
np.timedelta64(3600 * 24, 's'), timedelta(-2), timedelta(hours=48)]:
self.assertEqual((p + o).ordinal, tslib.iNaT)
for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(),
np.timedelta64(4, 'h'), timedelta(hours=23)]:
with tm.assertRaisesRegexp(ValueError, 'Input has different freq from Period'):
p + o
p = Period('NaT', freq='H')
for o in [offsets.Day(2), offsets.Hour(3), np.timedelta64(3, 'h'),
np.timedelta64(3600, 's'), timedelta(minutes=120),
timedelta(days=4, minutes=180)]:
self.assertEqual((p + o).ordinal, tslib.iNaT)
for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(),
np.timedelta64(3200, 's'), timedelta(hours=23, minutes=30)]:
with tm.assertRaisesRegexp(ValueError, 'Input has different freq from Period'):
p + o
def test_sub_offset(self):
# freq is DateOffset
p = Period('2011', freq='A')
self.assertEqual(p - offsets.YearEnd(2), Period('2009', freq='A'))
for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(),
np.timedelta64(365, 'D'), timedelta(365)]:
with tm.assertRaises(ValueError):
p - o
p = Period('2011-03', freq='M')
self.assertEqual(p - offsets.MonthEnd(2), Period('2011-01', freq='M'))
self.assertEqual(p - offsets.MonthEnd(12), Period('2010-03', freq='M'))
for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(),
np.timedelta64(365, 'D'), timedelta(365)]:
with tm.assertRaises(ValueError):
p - o
# freq is Tick
p = Period('2011-04-01', freq='D')
self.assertEqual(p - offsets.Day(5), Period('2011-03-27', freq='D'))
self.assertEqual(p - offsets.Hour(24), Period('2011-03-31', freq='D'))
self.assertEqual(p - np.timedelta64(2, 'D'), Period('2011-03-30', freq='D'))
self.assertEqual(p - np.timedelta64(3600 * 24, 's'), Period('2011-03-31', freq='D'))
self.assertEqual(p - timedelta(-2), Period('2011-04-03', freq='D'))
self.assertEqual(p - timedelta(hours=48), Period('2011-03-30', freq='D'))
for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(),
np.timedelta64(4, 'h'), timedelta(hours=23)]:
with tm.assertRaises(ValueError):
p - o
p = Period('2011-04-01 09:00', freq='H')
self.assertEqual(p - offsets.Day(2), Period('2011-03-30 09:00', freq='H'))
self.assertEqual(p - offsets.Hour(3), Period('2011-04-01 06:00', freq='H'))
self.assertEqual(p - np.timedelta64(3, 'h'), Period('2011-04-01 06:00', freq='H'))
self.assertEqual(p - np.timedelta64(3600, 's'), Period('2011-04-01 08:00', freq='H'))
self.assertEqual(p - timedelta(minutes=120), Period('2011-04-01 07:00', freq='H'))
self.assertEqual(p - timedelta(days=4, minutes=180), Period('2011-03-28 06:00', freq='H'))
for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(),
np.timedelta64(3200, 's'), timedelta(hours=23, minutes=30)]:
with tm.assertRaises(ValueError):
p - o
def test_sub_offset_nat(self):
# freq is DateOffset
p = Period('NaT', freq='A')
for o in [offsets.YearEnd(2)]:
self.assertEqual((p - o).ordinal, tslib.iNaT)
for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(),
np.timedelta64(365, 'D'), timedelta(365)]:
with tm.assertRaises(ValueError):
p - o
p = Period('NaT', freq='M')
for o in [offsets.MonthEnd(2), offsets.MonthEnd(12)]:
self.assertEqual((p - o).ordinal, tslib.iNaT)
for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(),
np.timedelta64(365, 'D'), timedelta(365)]:
with tm.assertRaises(ValueError):
p - o
# freq is Tick
p = Period('NaT', freq='D')
for o in [offsets.Day(5), offsets.Hour(24), np.timedelta64(2, 'D'),
np.timedelta64(3600 * 24, 's'), timedelta(-2), timedelta(hours=48)]:
self.assertEqual((p - o).ordinal, tslib.iNaT)
for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(),
np.timedelta64(4, 'h'), timedelta(hours=23)]:
with tm.assertRaises(ValueError):
p - o
p = Period('NaT', freq='H')
for o in [offsets.Day(2), offsets.Hour(3), np.timedelta64(3, 'h'),
np.timedelta64(3600, 's'), timedelta(minutes=120),
timedelta(days=4, minutes=180)]:
self.assertEqual((p - o).ordinal, tslib.iNaT)
for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(),
np.timedelta64(3200, 's'), timedelta(hours=23, minutes=30)]:
with tm.assertRaises(ValueError):
p - o
def test_nat_ops(self):
p = Period('NaT', freq='M')
self.assertEqual((p + 1).ordinal, tslib.iNaT)
self.assertEqual((p - 1).ordinal, tslib.iNaT)
self.assertEqual((p - Period('2011-01', freq='M')).ordinal, tslib.iNaT)
self.assertEqual((Period('2011-01', freq='M') - p).ordinal, tslib.iNaT)
def test_pi_ops_nat(self):
idx = PeriodIndex(['2011-01', '2011-02', 'NaT', '2011-04'], freq='M', name='idx')
result = idx + 2
expected = PeriodIndex(['2011-03', '2011-04', 'NaT', '2011-06'], freq='M', name='idx')
self.assertTrue(result.equals(expected))
result2 = result - 2
self.assertTrue(result2.equals(idx))
msg = "unsupported operand type\(s\)"
with tm.assertRaisesRegexp(TypeError, msg):
idx + "str"
class TestPeriodRepresentation(tm.TestCase):
"""
Wish to match NumPy units
"""
def test_annual(self):
self._check_freq('A', 1970)
def test_monthly(self):
self._check_freq('M', '1970-01')
def test_weekly(self):
self._check_freq('W-THU', '1970-01-01')
def test_daily(self):
self._check_freq('D', '1970-01-01')
def test_business_daily(self):
self._check_freq('B', '1970-01-01')
def test_hourly(self):
self._check_freq('H', '1970-01-01')
def test_minutely(self):
self._check_freq('T', '1970-01-01')
def test_secondly(self):
self._check_freq('S', '1970-01-01')
def test_millisecondly(self):
self._check_freq('L', '1970-01-01')
def test_microsecondly(self):
self._check_freq('U', '1970-01-01')
def test_nanosecondly(self):
self._check_freq('N', '1970-01-01')
def _check_freq(self, freq, base_date):
rng = PeriodIndex(start=base_date, periods=10, freq=freq)
exp = np.arange(10, dtype=np.int64)
self.assert_numpy_array_equal(rng.values, exp)
def test_negone_ordinals(self):
freqs = ['A', 'M', 'Q', 'D', 'H', 'T', 'S']
period = Period(ordinal=-1, freq='D')
for freq in freqs:
repr(period.asfreq(freq))
for freq in freqs:
period = Period(ordinal=-1, freq=freq)
repr(period)
self.assertEqual(period.year, 1969)
period = Period(ordinal=-1, freq='B')
repr(period)
period = Period(ordinal=-1, freq='W')
repr(period)
class TestComparisons(tm.TestCase):
def setUp(self):
self.january1 = Period('2000-01', 'M')
self.january2 = Period('2000-01', 'M')
self.february = Period('2000-02', 'M')
self.march = Period('2000-03', 'M')
self.day = Period('2012-01-01', 'D')
def test_equal(self):
self.assertEqual(self.january1, self.january2)
def test_equal_Raises_Value(self):
with tm.assertRaises(ValueError):
self.january1 == self.day
def test_notEqual(self):
self.assertNotEqual(self.january1, 1)
self.assertNotEqual(self.january1, self.february)
def test_greater(self):
self.assertTrue(self.february > self.january1)
def test_greater_Raises_Value(self):
with tm.assertRaises(ValueError):
self.january1 > self.day
def test_greater_Raises_Type(self):
with tm.assertRaises(TypeError):
self.january1 > 1
def test_greaterEqual(self):
self.assertTrue(self.january1 >= self.january2)
def test_greaterEqual_Raises_Value(self):
with tm.assertRaises(ValueError):
self.january1 >= self.day
with tm.assertRaises(TypeError):
print(self.january1 >= 1)
def test_smallerEqual(self):
self.assertTrue(self.january1 <= self.january2)
def test_smallerEqual_Raises_Value(self):
with tm.assertRaises(ValueError):
self.january1 <= self.day
def test_smallerEqual_Raises_Type(self):
with tm.assertRaises(TypeError):
self.january1 <= 1
def test_smaller(self):
self.assertTrue(self.january1 < self.february)
def test_smaller_Raises_Value(self):
with tm.assertRaises(ValueError):
self.january1 < self.day
def test_smaller_Raises_Type(self):
with tm.assertRaises(TypeError):
self.january1 < 1
def test_sort(self):
periods = [self.march, self.january1, self.february]
correctPeriods = [self.january1, self.february, self.march]
self.assertEqual(sorted(periods), correctPeriods)
def test_period_nat_comp(self):
p_nat = Period('NaT', freq='D')
p = Period('2011-01-01', freq='D')
nat = pd.Timestamp('NaT')
t = pd.Timestamp('2011-01-01')
# confirm Period('NaT') work identical with Timestamp('NaT')
for left, right in [(p_nat, p), (p, p_nat), (p_nat, p_nat),
(nat, t), (t, nat), (nat, nat)]:
self.assertEqual(left < right, False)
self.assertEqual(left > right, False)
self.assertEqual(left == right, False)
self.assertEqual(left != right, True)
self.assertEqual(left <= right, False)
self.assertEqual(left >= right, False)
def test_pi_nat_comp(self):
idx1 = PeriodIndex(['2011-01', '2011-02', 'NaT', '2011-05'], freq='M')
result = idx1 > Period('2011-02', freq='M')
self.assert_numpy_array_equal(result, np.array([False, False, False, True]))
result = idx1 == Period('NaT', freq='M')
self.assert_numpy_array_equal(result, np.array([False, False, False, False]))
result = idx1 != Period('NaT', freq='M')
self.assert_numpy_array_equal(result, np.array([True, True, True, True]))
idx2 = PeriodIndex(['2011-02', '2011-01', '2011-04', 'NaT'], freq='M')
result = idx1 < idx2
self.assert_numpy_array_equal(result, np.array([True, False, False, False]))
result = idx1 == idx1
self.assert_numpy_array_equal(result, np.array([True, True, False, True]))
result = idx1 != idx1
self.assert_numpy_array_equal(result, np.array([False, False, True, False]))
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| mit |
kdebrab/pandas | pandas/tests/test_lib.py | 7 | 7887 | # -*- coding: utf-8 -*-
import pytest
import numpy as np
from pandas import Index
from pandas._libs import lib, writers as libwriters
import pandas.util.testing as tm
class TestMisc(object):
def test_max_len_string_array(self):
arr = a = np.array(['foo', 'b', np.nan], dtype='object')
assert libwriters.max_len_string_array(arr) == 3
# unicode
arr = a.astype('U').astype(object)
assert libwriters.max_len_string_array(arr) == 3
# bytes for python3
arr = a.astype('S').astype(object)
assert libwriters.max_len_string_array(arr) == 3
# raises
pytest.raises(TypeError,
lambda: libwriters.max_len_string_array(arr.astype('U')))
def test_fast_unique_multiple_list_gen_sort(self):
keys = [['p', 'a'], ['n', 'd'], ['a', 's']]
gen = (key for key in keys)
expected = np.array(['a', 'd', 'n', 'p', 's'])
out = lib.fast_unique_multiple_list_gen(gen, sort=True)
tm.assert_numpy_array_equal(np.array(out), expected)
gen = (key for key in keys)
expected = np.array(['p', 'a', 'n', 'd', 's'])
out = lib.fast_unique_multiple_list_gen(gen, sort=False)
tm.assert_numpy_array_equal(np.array(out), expected)
class TestIndexing(object):
def test_maybe_indices_to_slice_left_edge(self):
target = np.arange(100)
# slice
indices = np.array([], dtype=np.int64)
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
assert isinstance(maybe_slice, slice)
tm.assert_numpy_array_equal(target[indices], target[maybe_slice])
for end in [1, 2, 5, 20, 99]:
for step in [1, 2, 4]:
indices = np.arange(0, end, step, dtype=np.int64)
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
assert isinstance(maybe_slice, slice)
tm.assert_numpy_array_equal(target[indices],
target[maybe_slice])
# reverse
indices = indices[::-1]
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
assert isinstance(maybe_slice, slice)
tm.assert_numpy_array_equal(target[indices],
target[maybe_slice])
# not slice
for case in [[2, 1, 2, 0], [2, 2, 1, 0], [0, 1, 2, 1], [-2, 0, 2],
[2, 0, -2]]:
indices = np.array(case, dtype=np.int64)
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
assert not isinstance(maybe_slice, slice)
tm.assert_numpy_array_equal(maybe_slice, indices)
tm.assert_numpy_array_equal(target[indices], target[maybe_slice])
def test_maybe_indices_to_slice_right_edge(self):
target = np.arange(100)
# slice
for start in [0, 2, 5, 20, 97, 98]:
for step in [1, 2, 4]:
indices = np.arange(start, 99, step, dtype=np.int64)
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
assert isinstance(maybe_slice, slice)
tm.assert_numpy_array_equal(target[indices],
target[maybe_slice])
# reverse
indices = indices[::-1]
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
assert isinstance(maybe_slice, slice)
tm.assert_numpy_array_equal(target[indices],
target[maybe_slice])
# not slice
indices = np.array([97, 98, 99, 100], dtype=np.int64)
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
assert not isinstance(maybe_slice, slice)
tm.assert_numpy_array_equal(maybe_slice, indices)
with pytest.raises(IndexError):
target[indices]
with pytest.raises(IndexError):
target[maybe_slice]
indices = np.array([100, 99, 98, 97], dtype=np.int64)
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
assert not isinstance(maybe_slice, slice)
tm.assert_numpy_array_equal(maybe_slice, indices)
with pytest.raises(IndexError):
target[indices]
with pytest.raises(IndexError):
target[maybe_slice]
for case in [[99, 97, 99, 96], [99, 99, 98, 97], [98, 98, 97, 96]]:
indices = np.array(case, dtype=np.int64)
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
assert not isinstance(maybe_slice, slice)
tm.assert_numpy_array_equal(maybe_slice, indices)
tm.assert_numpy_array_equal(target[indices], target[maybe_slice])
def test_maybe_indices_to_slice_both_edges(self):
target = np.arange(10)
# slice
for step in [1, 2, 4, 5, 8, 9]:
indices = np.arange(0, 9, step, dtype=np.int64)
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
assert isinstance(maybe_slice, slice)
tm.assert_numpy_array_equal(target[indices], target[maybe_slice])
# reverse
indices = indices[::-1]
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
assert isinstance(maybe_slice, slice)
tm.assert_numpy_array_equal(target[indices], target[maybe_slice])
# not slice
for case in [[4, 2, 0, -2], [2, 2, 1, 0], [0, 1, 2, 1]]:
indices = np.array(case, dtype=np.int64)
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
assert not isinstance(maybe_slice, slice)
tm.assert_numpy_array_equal(maybe_slice, indices)
tm.assert_numpy_array_equal(target[indices], target[maybe_slice])
def test_maybe_indices_to_slice_middle(self):
target = np.arange(100)
# slice
for start, end in [(2, 10), (5, 25), (65, 97)]:
for step in [1, 2, 4, 20]:
indices = np.arange(start, end, step, dtype=np.int64)
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
assert isinstance(maybe_slice, slice)
tm.assert_numpy_array_equal(target[indices],
target[maybe_slice])
# reverse
indices = indices[::-1]
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
assert isinstance(maybe_slice, slice)
tm.assert_numpy_array_equal(target[indices],
target[maybe_slice])
# not slice
for case in [[14, 12, 10, 12], [12, 12, 11, 10], [10, 11, 12, 11]]:
indices = np.array(case, dtype=np.int64)
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
assert not isinstance(maybe_slice, slice)
tm.assert_numpy_array_equal(maybe_slice, indices)
tm.assert_numpy_array_equal(target[indices], target[maybe_slice])
def test_maybe_booleans_to_slice(self):
arr = np.array([0, 0, 1, 1, 1, 0, 1], dtype=np.uint8)
result = lib.maybe_booleans_to_slice(arr)
assert result.dtype == np.bool_
result = lib.maybe_booleans_to_slice(arr[:0])
assert result == slice(0, 0)
def test_get_reverse_indexer(self):
indexer = np.array([-1, -1, 1, 2, 0, -1, 3, 4], dtype=np.int64)
result = lib.get_reverse_indexer(indexer, 5)
expected = np.array([4, 2, 3, 6, 7], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
def test_cache_readonly_preserve_docstrings():
# GH18197
assert Index.hasnans.__doc__ is not None
| bsd-3-clause |
shyamalschandra/scikit-learn | benchmarks/bench_sample_without_replacement.py | 397 | 8008 | """
Benchmarks for sampling without replacement of integer.
"""
from __future__ import division
from __future__ import print_function
import gc
import sys
import optparse
from datetime import datetime
import operator
import matplotlib.pyplot as plt
import numpy as np
import random
from sklearn.externals.six.moves import xrange
from sklearn.utils.random import sample_without_replacement
def compute_time(t_start, delta):
mu_second = 0.0 + 10 ** 6 # number of microseconds in a second
return delta.seconds + delta.microseconds / mu_second
def bench_sample(sampling, n_population, n_samples):
gc.collect()
# start time
t_start = datetime.now()
sampling(n_population, n_samples)
delta = (datetime.now() - t_start)
# stop time
time = compute_time(t_start, delta)
return time
if __name__ == "__main__":
###########################################################################
# Option parser
###########################################################################
op = optparse.OptionParser()
op.add_option("--n-times",
dest="n_times", default=5, type=int,
help="Benchmark results are average over n_times experiments")
op.add_option("--n-population",
dest="n_population", default=100000, type=int,
help="Size of the population to sample from.")
op.add_option("--n-step",
dest="n_steps", default=5, type=int,
help="Number of step interval between 0 and n_population.")
default_algorithms = "custom-tracking-selection,custom-auto," \
"custom-reservoir-sampling,custom-pool,"\
"python-core-sample,numpy-permutation"
op.add_option("--algorithm",
dest="selected_algorithm",
default=default_algorithms,
type=str,
help="Comma-separated list of transformer to benchmark. "
"Default: %default. \nAvailable: %default")
# op.add_option("--random-seed",
# dest="random_seed", default=13, type=int,
# help="Seed used by the random number generators.")
(opts, args) = op.parse_args()
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
selected_algorithm = opts.selected_algorithm.split(',')
for key in selected_algorithm:
if key not in default_algorithms.split(','):
raise ValueError("Unknown sampling algorithm \"%s\" not in (%s)."
% (key, default_algorithms))
###########################################################################
# List sampling algorithm
###########################################################################
# We assume that sampling algorithm has the following signature:
# sample(n_population, n_sample)
#
sampling_algorithm = {}
###########################################################################
# Set Python core input
sampling_algorithm["python-core-sample"] = \
lambda n_population, n_sample: \
random.sample(xrange(n_population), n_sample)
###########################################################################
# Set custom automatic method selection
sampling_algorithm["custom-auto"] = \
lambda n_population, n_samples, random_state=None: \
sample_without_replacement(n_population,
n_samples,
method="auto",
random_state=random_state)
###########################################################################
# Set custom tracking based method
sampling_algorithm["custom-tracking-selection"] = \
lambda n_population, n_samples, random_state=None: \
sample_without_replacement(n_population,
n_samples,
method="tracking_selection",
random_state=random_state)
###########################################################################
# Set custom reservoir based method
sampling_algorithm["custom-reservoir-sampling"] = \
lambda n_population, n_samples, random_state=None: \
sample_without_replacement(n_population,
n_samples,
method="reservoir_sampling",
random_state=random_state)
###########################################################################
# Set custom reservoir based method
sampling_algorithm["custom-pool"] = \
lambda n_population, n_samples, random_state=None: \
sample_without_replacement(n_population,
n_samples,
method="pool",
random_state=random_state)
###########################################################################
# Numpy permutation based
sampling_algorithm["numpy-permutation"] = \
lambda n_population, n_sample: \
np.random.permutation(n_population)[:n_sample]
###########################################################################
# Remove unspecified algorithm
sampling_algorithm = dict((key, value)
for key, value in sampling_algorithm.items()
if key in selected_algorithm)
###########################################################################
# Perform benchmark
###########################################################################
time = {}
n_samples = np.linspace(start=0, stop=opts.n_population,
num=opts.n_steps).astype(np.int)
ratio = n_samples / opts.n_population
print('Benchmarks')
print("===========================")
for name in sorted(sampling_algorithm):
print("Perform benchmarks for %s..." % name, end="")
time[name] = np.zeros(shape=(opts.n_steps, opts.n_times))
for step in xrange(opts.n_steps):
for it in xrange(opts.n_times):
time[name][step, it] = bench_sample(sampling_algorithm[name],
opts.n_population,
n_samples[step])
print("done")
print("Averaging results...", end="")
for name in sampling_algorithm:
time[name] = np.mean(time[name], axis=1)
print("done\n")
# Print results
###########################################################################
print("Script arguments")
print("===========================")
arguments = vars(opts)
print("%s \t | %s " % ("Arguments".ljust(16),
"Value".center(12),))
print(25 * "-" + ("|" + "-" * 14) * 1)
for key, value in arguments.items():
print("%s \t | %s " % (str(key).ljust(16),
str(value).strip().center(12)))
print("")
print("Sampling algorithm performance:")
print("===============================")
print("Results are averaged over %s repetition(s)." % opts.n_times)
print("")
fig = plt.figure('scikit-learn sample w/o replacement benchmark results')
plt.title("n_population = %s, n_times = %s" %
(opts.n_population, opts.n_times))
ax = fig.add_subplot(111)
for name in sampling_algorithm:
ax.plot(ratio, time[name], label=name)
ax.set_xlabel('ratio of n_sample / n_population')
ax.set_ylabel('Time (s)')
ax.legend()
# Sort legend labels
handles, labels = ax.get_legend_handles_labels()
hl = sorted(zip(handles, labels), key=operator.itemgetter(1))
handles2, labels2 = zip(*hl)
ax.legend(handles2, labels2, loc=0)
plt.show()
| bsd-3-clause |
jobovy/apogee-maps | py/plot_ah_location.py | 1 | 8658 | ###############################################################################
# plot_ah_location: plot the range of extinctions effor a given location
###############################################################################
import os, os.path
import sys
import pickle
import numpy
import matplotlib
matplotlib.use('Agg')
from galpy.util import save_pickles, bovy_plot
from matplotlib import rc, pyplot
import mwdust
import apogee.select.apogeeSelect
from define_rcsample import get_rcsample
_PLOTDIST= True
_LW= 1.5
def plot_ah_location(location,plotname):
# Setup selection function
selectFile= '../savs/selfunc-nospdata.sav'
if os.path.exists(selectFile):
with open(selectFile,'rb') as savefile:
apo= pickle.load(savefile)
else:
# Setup selection function
apo= apogee.select.apogeeSelect()
# Delete these because they're big and we don't need them
del apo._specdata
del apo._photdata
save_pickles(selectFile,apo)
glon, glat= apo.glonGlat(location)
glon= glon[0]
glat= glat[0]
ahFile= '../savs/ah-%i.sav' % location
if not os.path.exists(ahFile):
# Distances at which to calculate the extinction
distmods= numpy.linspace(7.,15.5,301)
ds= 10.**(distmods/5-2.)
# Setup Green et al. (2015) dust map
gd= mwdust.Green15(filter='2MASS H')
pa, ah= gd.dust_vals_disk(glon,glat,ds,apo.radius(location))
meanah_default= numpy.sum(numpy.tile(pa,(len(ds),1)).T*ah,axis=0)/numpy.sum(pa)
stdah_default= numpy.sqrt(numpy.sum(numpy.tile(pa,(len(ds),1)).T\
*ah**2.,axis=0)\
/numpy.sum(pa)-meanah_default**2.)
# Marshall et al. (2006)
marshall= mwdust.Marshall06(filter='2MASS H')
try:
pa, ah= marshall.dust_vals_disk(glon,glat,ds,apo.radius(location))
except IndexError:
meanah_marshall= -numpy.ones_like(ds)
stdah_marshall= -numpy.ones_like(ds)
else:
meanah_marshall= numpy.sum(numpy.tile(pa,(len(ds),1)).T*ah,
axis=0)/numpy.sum(pa)
stdah_marshall= numpy.sqrt(numpy.sum(numpy.tile(pa,(len(ds),1)).T\
*ah**2.,axis=0)\
/numpy.sum(pa)-meanah_marshall**2.)
if True:
# Drimmel et al. (2003)
drimmel= mwdust.Drimmel03(filter='2MASS H')
pa, ah= drimmel.dust_vals_disk(glon,glat,ds,apo.radius(location))
meanah_drimmel= numpy.sum(numpy.tile(pa,(len(ds),1)).T*ah,axis=0)/numpy.sum(pa)
stdah_drimmel= numpy.sqrt(numpy.sum(numpy.tile(pa,(len(ds),1)).T\
*ah**2.,axis=0)\
/numpy.sum(pa)-meanah_drimmel**2.)
else:
meanah_drimmel= -numpy.ones_like(ds)
stdah_drimmel= -numpy.ones_like(ds)
if True:
# Sale et al. (2014)
sale= mwdust.Sale14(filter='2MASS H')
try:
pa, ah= sale.dust_vals_disk(glon,glat,ds,apo.radius(location))
meanah_sale= numpy.sum(numpy.tile(pa,(len(ds),1)).T*ah,
axis=0)/numpy.sum(pa)
except (TypeError,ValueError):
meanah_sale= -numpy.ones_like(ds)
stdah_sale= -numpy.ones_like(ds)
else:
stdah_sale= numpy.sqrt(numpy.sum(numpy.tile(pa,(len(ds),1)).T\
*ah**2.,axis=0)\
/numpy.sum(pa)-meanah_sale**2.)
else:
meanah_sale= -numpy.ones_like(ds)
stdah_sale= -numpy.ones_like(ds)
save_pickles(ahFile,distmods,meanah_default,stdah_default,
meanah_marshall,stdah_marshall,
meanah_drimmel,stdah_drimmel,
meanah_sale,stdah_sale)
else:
with open(ahFile,'rb') as savefile:
distmods= pickle.load(savefile)
meanah_default= pickle.load(savefile)
stdah_default= pickle.load(savefile)
meanah_marshall= pickle.load(savefile)
stdah_marshall= pickle.load(savefile)
meanah_drimmel= pickle.load(savefile)
stdah_drimmel= pickle.load(savefile)
meanah_sale= pickle.load(savefile)
stdah_sale= pickle.load(savefile)
# Now plot
bovy_plot.bovy_print(fig_height=3.)
if _PLOTDIST:
distmods= 10.**(distmods/5-2.)
xrange= [0.,12.]
xlabel=r'$D\,(\mathrm{kpc})$'
else:
xrange=[7.,15.8],
xlabel=r'$\mathrm{distance\ modulus}\ \mu$'
ylabel=r'$A_H$'
yrange= [0.,1.2*numpy.amax(numpy.vstack((meanah_default+stdah_default,
meanah_marshall+stdah_marshall,
meanah_drimmel+stdah_drimmel,
meanah_sale+stdah_sale)))]
line_default= bovy_plot.bovy_plot(distmods,meanah_default,
'b-',lw=_LW,zorder=12,
xrange=xrange,
xlabel=xlabel,
yrange=yrange,
ylabel=ylabel)
pyplot.fill_between(distmods,
meanah_default-stdah_default,
meanah_default+stdah_default,
hatch='/',facecolor=(0,0,0,0),
color='b',lw=0.25,zorder=4)
line_marshall= bovy_plot.bovy_plot(distmods,meanah_marshall,'r-',lw=_LW,
overplot=True,
zorder=8)
pyplot.fill_between(distmods,
meanah_marshall-stdah_marshall,
meanah_marshall+stdah_marshall,
hatch='\\',facecolor=(0,0,0,0),
color='r',lw=0.25,zorder=2)
line_drimmel= bovy_plot.bovy_plot(distmods,meanah_drimmel,'-',lw=_LW,
color='gold',
overplot=True,
zorder=7)
pyplot.fill_between(distmods,
meanah_drimmel-stdah_drimmel,
meanah_drimmel+stdah_drimmel,
hatch='///',facecolor=(0,0,0,0),
color='gold',lw=0.25,zorder=1)
line_sale= bovy_plot.bovy_plot(distmods,meanah_sale,'-',lw=_LW,
color='c',
overplot=True,
zorder=9)
pyplot.fill_between(distmods,
meanah_sale-stdah_sale,
meanah_sale+stdah_sale,
hatch='//',facecolor=(0,0,0,0),
color='c',lw=0.25,zorder=3)
if True:
data= get_rcsample()
data= data[data['LOCATION_ID'] == location]
bovy_plot.bovy_plot(data['RC_DIST'],data['AK_TARG']*1.55,
'ko',zorder=20,overplot=True,ms=2.)
if location == 4318:
pyplot.legend((line_default[0],line_sale[0]),
(r'$\mathrm{Green\ et\ al.\ (2015)}$',
r'$\mathrm{Sale\ et\ al.\ (2014)}$'),
loc='lower right',#bbox_to_anchor=(.91,.375),
numpoints=8,
prop={'size':14},
frameon=False)
elif location == 4242:
pyplot.legend((line_marshall[0],line_drimmel[0]),
(r'$\mathrm{Marshall\ et\ al.\ (2006)}$',
r'$\mathrm{Drimmel\ et\ al.\ (2003)}$'),
loc='lower right',#bbox_to_anchor=(.91,.375),
numpoints=8,
prop={'size':14},
frameon=False)
# Label
lcen, bcen= apo.glonGlat(location)
if numpy.fabs(bcen) < 0.1: bcen= 0.
bovy_plot.bovy_text(r'$(l,b) = (%.1f,%.1f)$' % (lcen,bcen),
top_right=True,size=16.)
bovy_plot.bovy_end_print(plotname,dpi=300,
bbox_extra_artists=pyplot.gca().get_children(),
bbox_inches='tight')
return None
if __name__ == '__main__':
#4240 is 30,0
plot_ah_location(int(sys.argv[1]),sys.argv[2])
| bsd-3-clause |
mm22dl/MeinKPS | idc.py | 1 | 13515 | #! /usr/bin/python
# -*- coding: utf-8 -*-
"""
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Title: idc
Author: David Leclerc
Version: 0.1
Date: 30.06.2017
License: GNU General Public License, Version 3
(http://www.gnu.org/licenses/gpl.html)
Overview: ...
Notes: ...
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
# LIBRARIES
import numpy as np
import matplotlib.pyplot as plt
# USER LIBRARIES
import lib
class IDC(object):
def __init__(self, DIA, PIA = None):
"""
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
INIT
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Warning: all IDC curves take NEGATIVE time input! For example, if
it has been 2 hours since a bolus has been given, then the
corresponding time of said bolus is given by t = -2.
"""
# Define DIA
self.DIA = float(DIA)
# Define PIA
self.PIA = PIA if PIA is None else float(PIA)
# Define plot limits
self.xlim = [-self.DIA, 0]
self.ylim = [0, 1]
def f(self, t):
"""
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
F
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Gives fraction of active insulin remaining in body t hours after
enacting it.
Note: -DIA <= t <= 0
"""
raise NotImplementedError
def F(self, t):
"""
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
F
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Implicit integration of IDC.
Note: Only makes sense when taking difference between two values of
the integral (e.g. dF = F2 - F1)
"""
raise NotImplementedError
def correct(self, t):
"""
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
CORRECT
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Bring back given time within insulin's range of action.
"""
# If too old: bring it back up
if t < -self.DIA:
t = -self.DIA
# If too new: bring it back down
elif t > 0:
raise ValueError("Given insulin age is too new.")
# Return corrected time
return t
def plot(self, show = True, color = "black", n = 1, size = [1, 1]):
"""
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
PLOT
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
# Define subplot
ax = plt.subplot(size[0], size[1], n)
# Define title
title = "IDCs"
# Define axis labels
x = "(h)"
y = "(-)"
# Define subplot label
label = self.__class__.__name__
# Subplots
if size[0] > 1 or size[1] > 1:
# Title of each subplot corresponds to its label
title = label
# Set title
ax.set_title(title, fontweight = "semibold")
# Set axis labels
ax.set_xlabel(x)
ax.set_ylabel(y)
# Set axis limits
ax.set_xlim(self.xlim)
ax.set_ylim(self.ylim)
# Compute axes
t = np.linspace(-self.DIA, 0, 100)
y = np.vectorize(self.f)(t)
# Add data to plot
ax.plot(t, y, lw = 2, ls = "-", label = label, c = color)
# Single plot: show legend
if size == [1, 1]:
ax.legend()
# Ready to show?
if show:
plt.show()
class FourthOrderIDC(IDC):
def __init__(self, DIA):
"""
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
INIT
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Modelization of IDC as a 4th-order polynomial.
"""
# Start initialization
super(FourthOrderIDC, self).__init__(DIA)
# Initialize 4th-order parameters
self.m0 = None
self.m1 = None
self.m2 = None
self.m3 = None
self.m4 = None
def f(self, t):
"""
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
F
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
# Correct time
t = self.correct(t)
# Compute f(t) of IDC
f = (self.m4 * t ** 4 +
self.m3 * t ** 3 +
self.m2 * t ** 2 +
self.m1 * t ** 1 +
self.m0)
# Return it
return f
def F(self, t):
"""
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
F
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
# Correct time
t = self.correct(t)
# Compute F(t) of IDC
F = (self.m4 * t ** 5 / 5 +
self.m3 * t ** 4 / 4 +
self.m2 * t ** 3 / 3 +
self.m1 * t ** 2 / 2 +
self.m0 * t ** 1 / 1)
# Return it
return F
class ExponentialIDC(IDC):
def __init__(self, DIA, PIA):
"""
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
INIT
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Modelization of IDC based on an exponential model.
"""
# Start initialization
super(ExponentialIDC, self).__init__(DIA, PIA)
# Time constant of exponential decay
self.tau = self.PIA * ((1 - self.PIA / self.DIA) /
(1 - 2 * self.PIA / self.DIA))
# Rise time factor
self.a = 2 * self.tau / self.DIA
# Auxiliary scale factor
self.s = 1 / (1 - self.a + (1 + self.a) * np.exp(-self.DIA / self.tau))
def f(self, t):
"""
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
F
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
# Correct time
t = self.correct(t)
# Compute IDC(t)
f = 1 - self.s * (1 - self.a) * ((t ** 2 /
(self.tau * self.DIA * (1 - self.a)) +
t / self.tau - 1) * np.exp(t / self.tau) + 1)
# Return it
return f
def F(self, t):
"""
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
F
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
# Correct time
t = self.correct(t)
# Compute F(t) of IDC
F = -(self.s * np.exp(t/self.tau) * (t *
(-self.a * self.DIA - 2 * self.tau + self.DIA) + 2 * self.tau *
((self.a - 1) * self.DIA + self.tau) +
t ** 2)) / self.DIA + (self.a - 1) * self.s * t + t
# Return it
return F
class TriangleModelIDC(IDC):
def __init__(self, DIA, PIA):
"""
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
INIT
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Modelization of IDC based on a triangle IAC. The IAC is given by the
following formula (with negative times since injection):
IAC(t) = m_0 * t + b_0 for t = [-DIA, -PIA]
m_1 * t + b_1 for t = [-PIA, 0]
where the units of IAC are given by [/h].
We assume that the IDC is given by the integral of the IAC:
IDC(t) = S IAC(t) * dt
= m_0 * t ** 2 / 2 + b_0 * t + c_0 for t = [-DIA, -PIA]
m_1 * t ** 2 / 2 + b_1 * t + c_1 for t = [-PIA, 0]
where S represents an integral on time t.
"""
# Start initialization
super(TriangleModelIDC, self).__init__(DIA)
# Define PIA
self.PIA = float(PIA)
# Compute value of IAC at peak of action [y0 = IAC(PIA)] using
# normalization: S IAC(t) * dt = 1
self.y0 = 2 / self.DIA
# Define coefficients for t = [-DIA, -PIA]
self.m0 = self.y0 / (self.DIA - self.PIA)
self.b0 = self.m0 * self.DIA
self.c0 = self.DIA * (self.b0 - self.m0 * self.DIA / 2)
# Define coefficients for t = [-PIA, 0]
self.m1 = -self.y0 / self.PIA
self.b1 = 0
self.c1 = 1
def f(self, t):
"""
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
F
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
# Correct time
t = self.correct(t)
# From -DIA to PIA
if -self.DIA <= t <= -self.PIA:
# Link coefficients
m = self.m0
b = self.b0
c = self.c0
# From PIA to 0
elif -self.PIA < t <= 0:
# Link coefficients
m = self.m1
b = self.b1
c = self.c1
# Compute IDC(t)
f = m * t ** 2 / 2 + b * t + c
# Return it
return f
def F(self, t):
"""
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
F
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The integration of the piecewise IDC is based on the following rule:
S_a^b f(t) * dt = S_u^b f(t) * dt - S_u^a f(t) * dt
where S_a^b represents the integral on time of f(t) from a to b.
"""
# Define integral
def I(t, m, b, c):
return m * t ** 3 / 6 + b * t ** 2 / 2 + c * t
# Correct time
t = self.correct(t)
# Initialize result
F = 0
# From -DIA to PIA
if -self.DIA <= t <= -self.PIA:
# Define reference point
T = -self.DIA
# Link coefficients
m = self.m0
b = self.b0
c = self.c0
# From PIA to 0
elif -self.PIA < t <= 0:
# Define reference point
T = -self.PIA
# Link coefficients
m = self.m1
b = self.b1
c = self.c1
# Add first part of integral
F += self.F(T)
# Compute it
F += I(t, m, b, c) - I(T, m, b, c)
# Return it
return F
class WalshIDC(FourthOrderIDC):
def __init__(self, DIA):
"""
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
INIT
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
# Start initialization
super(WalshIDC, self).__init__(DIA)
# Define parameters of IDC for various DIA
if DIA == 3:
self.m4 = -4.151e-2
self.m3 = -2.925e-1
self.m2 = -6.332e-1
self.m1 = -5.553e-2
self.m0 = 9.995e-1
elif DIA == 4:
self.m4 = -4.290e-3
self.m3 = -5.465e-2
self.m2 = -1.984e-1
self.m1 = 5.452e-2
self.m0 = 9.995e-1
elif DIA == 5:
self.m4 = -3.823e-3
self.m3 = -5.011e-2
self.m2 = -1.998e-1
self.m1 = -2.694e-2
self.m0 = 9.930e-1
elif DIA == 6:
self.m4 = -1.935e-3
self.m3 = -3.052e-2
self.m2 = -1.474e-1
self.m1 = -3.819e-2
self.m0 = 9.970e-1
# Bad DIA
else:
raise ValueError("Bad DIA: " + str(DIA))
class FiaspIDC(TriangleModelIDC):
def __init__(self, DIA):
"""
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
INIT
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Set peak of insulin action at a 6th of the DIA by default. For
example, if the insulin action lasts 6 hours, then the peak of
action would be presumed to be at 1 hour after injection.
"""
# Start initialization
super(FiaspIDC, self).__init__(DIA, DIA / 6.0)
def main():
"""
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
MAIN
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
# Define DIAs
DIAs = [5.0]
# Initialize plot
lib.initPlot()
# Plot IDCs with various DIAs
for DIA in DIAs:
# Instanciate IDCs
NovoRapid = WalshIDC(DIA)
Fiasp = FiaspIDC(DIA)
# Add them to plot
NovoRapid.plot(False, "orange")
Fiasp.plot(False, "#99e500")
# Instanciate an exponential IDC
ExponentialNovo = ExponentialIDC(6.0, 1.5)
# Add it to plot
ExponentialNovo.plot(False, "blue")
# Show plot
plt.show()
# Run this when script is called from terminal
if __name__ == "__main__":
main() | gpl-3.0 |
Vimos/scikit-learn | sklearn/metrics/classification.py | 4 | 72788 | """Metrics to assess performance on classification task given class prediction
Functions named as ``*_score`` return a scalar value to maximize: the higher
the better
Function named as ``*_error`` or ``*_loss`` return a scalar value to minimize:
the lower the better
"""
# Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Olivier Grisel <[email protected]>
# Arnaud Joly <[email protected]>
# Jochen Wersdorfer <[email protected]>
# Lars Buitinck
# Joel Nothman <[email protected]>
# Noel Dawe <[email protected]>
# Jatin Shah <[email protected]>
# Saurabh Jha <[email protected]>
# Bernardo Stein <[email protected]>
# License: BSD 3 clause
from __future__ import division
import warnings
import numpy as np
from scipy.sparse import coo_matrix
from scipy.sparse import csr_matrix
from ..preprocessing import LabelBinarizer, label_binarize
from ..preprocessing import LabelEncoder
from ..utils import assert_all_finite
from ..utils import check_array
from ..utils import check_consistent_length
from ..utils import column_or_1d
from ..utils.multiclass import unique_labels
from ..utils.multiclass import type_of_target
from ..utils.validation import _num_samples
from ..utils.sparsefuncs import count_nonzero
from ..utils.fixes import bincount
from ..exceptions import UndefinedMetricWarning
def _check_targets(y_true, y_pred):
"""Check that y_true and y_pred belong to the same classification task
This converts multiclass or binary types to a common shape, and raises a
ValueError for a mix of multilabel and multiclass targets, a mix of
multilabel formats, for the presence of continuous-valued or multioutput
targets, or for targets of different lengths.
Column vectors are squeezed to 1d, while multilabel formats are returned
as CSR sparse label indicators.
Parameters
----------
y_true : array-like
y_pred : array-like
Returns
-------
type_true : one of {'multilabel-indicator', 'multiclass', 'binary'}
The type of the true target data, as output by
``utils.multiclass.type_of_target``
y_true : array or indicator matrix
y_pred : array or indicator matrix
"""
check_consistent_length(y_true, y_pred)
type_true = type_of_target(y_true)
type_pred = type_of_target(y_pred)
y_type = set([type_true, type_pred])
if y_type == set(["binary", "multiclass"]):
y_type = set(["multiclass"])
if len(y_type) > 1:
raise ValueError("Can't handle mix of {0} and {1}"
"".format(type_true, type_pred))
# We can't have more than one value on y_type => The set is no more needed
y_type = y_type.pop()
# No metrics support "multiclass-multioutput" format
if (y_type not in ["binary", "multiclass", "multilabel-indicator"]):
raise ValueError("{0} is not supported".format(y_type))
if y_type in ["binary", "multiclass"]:
y_true = column_or_1d(y_true)
y_pred = column_or_1d(y_pred)
if y_type == "binary":
unique_values = np.union1d(y_true, y_pred)
if len(unique_values) > 2:
y_type = "multiclass"
if y_type.startswith('multilabel'):
y_true = csr_matrix(y_true)
y_pred = csr_matrix(y_pred)
y_type = 'multilabel-indicator'
return y_type, y_true, y_pred
def _weighted_sum(sample_score, sample_weight, normalize=False):
if normalize:
return np.average(sample_score, weights=sample_weight)
elif sample_weight is not None:
return np.dot(sample_score, sample_weight)
else:
return sample_score.sum()
def accuracy_score(y_true, y_pred, normalize=True, sample_weight=None):
"""Accuracy classification score.
In multilabel classification, this function computes subset accuracy:
the set of labels predicted for a sample must *exactly* match the
corresponding set of labels in y_true.
Read more in the :ref:`User Guide <accuracy_score>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
normalize : bool, optional (default=True)
If ``False``, return the number of correctly classified samples.
Otherwise, return the fraction of correctly classified samples.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
score : float
If ``normalize == True``, return the correctly classified samples
(float), else it returns the number of correctly classified samples
(int).
The best performance is 1 with ``normalize == True`` and the number
of samples with ``normalize == False``.
See also
--------
jaccard_similarity_score, hamming_loss, zero_one_loss
Notes
-----
In binary and multiclass classification, this function is equal
to the ``jaccard_similarity_score`` function.
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import accuracy_score
>>> y_pred = [0, 2, 1, 3]
>>> y_true = [0, 1, 2, 3]
>>> accuracy_score(y_true, y_pred)
0.5
>>> accuracy_score(y_true, y_pred, normalize=False)
2
In the multilabel case with binary label indicators:
>>> accuracy_score(np.array([[0, 1], [1, 1]]), np.ones((2, 2)))
0.5
"""
# Compute accuracy for each possible representation
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if y_type.startswith('multilabel'):
differing_labels = count_nonzero(y_true - y_pred, axis=1)
score = differing_labels == 0
else:
score = y_true == y_pred
return _weighted_sum(score, sample_weight, normalize)
def confusion_matrix(y_true, y_pred, labels=None, sample_weight=None):
"""Compute confusion matrix to evaluate the accuracy of a classification
By definition a confusion matrix :math:`C` is such that :math:`C_{i, j}`
is equal to the number of observations known to be in group :math:`i` but
predicted to be in group :math:`j`.
Thus in binary classification, the count of true negatives is
:math:`C_{0,0}`, false negatives is :math:`C_{1,0}`, true positives is
:math:`C_{1,1}` and false positives is :math:`C_{0,1}`.
Read more in the :ref:`User Guide <confusion_matrix>`.
Parameters
----------
y_true : array, shape = [n_samples]
Ground truth (correct) target values.
y_pred : array, shape = [n_samples]
Estimated targets as returned by a classifier.
labels : array, shape = [n_classes], optional
List of labels to index the matrix. This may be used to reorder
or select a subset of labels.
If none is given, those that appear at least once
in ``y_true`` or ``y_pred`` are used in sorted order.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
C : array, shape = [n_classes, n_classes]
Confusion matrix
References
----------
.. [1] `Wikipedia entry for the Confusion matrix
<https://en.wikipedia.org/wiki/Confusion_matrix>`_
Examples
--------
>>> from sklearn.metrics import confusion_matrix
>>> y_true = [2, 0, 2, 2, 0, 1]
>>> y_pred = [0, 0, 2, 2, 0, 2]
>>> confusion_matrix(y_true, y_pred)
array([[2, 0, 0],
[0, 0, 1],
[1, 0, 2]])
>>> y_true = ["cat", "ant", "cat", "cat", "ant", "bird"]
>>> y_pred = ["ant", "ant", "cat", "cat", "ant", "cat"]
>>> confusion_matrix(y_true, y_pred, labels=["ant", "bird", "cat"])
array([[2, 0, 0],
[0, 0, 1],
[1, 0, 2]])
In the binary case, we can extract true positives, etc as follows:
>>> tn, fp, fn, tp = confusion_matrix([0, 1, 0, 1], [1, 1, 1, 0]).ravel()
>>> (tn, fp, fn, tp)
(0, 2, 1, 1)
"""
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if y_type not in ("binary", "multiclass"):
raise ValueError("%s is not supported" % y_type)
if labels is None:
labels = unique_labels(y_true, y_pred)
else:
labels = np.asarray(labels)
if np.all([l not in y_true for l in labels]):
raise ValueError("At least one label specified must be in y_true")
if sample_weight is None:
sample_weight = np.ones(y_true.shape[0], dtype=np.int)
else:
sample_weight = np.asarray(sample_weight)
check_consistent_length(sample_weight, y_true, y_pred)
n_labels = labels.size
label_to_ind = dict((y, x) for x, y in enumerate(labels))
# convert yt, yp into index
y_pred = np.array([label_to_ind.get(x, n_labels + 1) for x in y_pred])
y_true = np.array([label_to_ind.get(x, n_labels + 1) for x in y_true])
# intersect y_pred, y_true with labels, eliminate items not in labels
ind = np.logical_and(y_pred < n_labels, y_true < n_labels)
y_pred = y_pred[ind]
y_true = y_true[ind]
# also eliminate weights of eliminated items
sample_weight = sample_weight[ind]
CM = coo_matrix((sample_weight, (y_true, y_pred)),
shape=(n_labels, n_labels)
).toarray()
return CM
def cohen_kappa_score(y1, y2, labels=None, weights=None, sample_weight=None):
"""Cohen's kappa: a statistic that measures inter-annotator agreement.
This function computes Cohen's kappa [1]_, a score that expresses the level
of agreement between two annotators on a classification problem. It is
defined as
.. math::
\kappa = (p_o - p_e) / (1 - p_e)
where :math:`p_o` is the empirical probability of agreement on the label
assigned to any sample (the observed agreement ratio), and :math:`p_e` is
the expected agreement when both annotators assign labels randomly.
:math:`p_e` is estimated using a per-annotator empirical prior over the
class labels [2]_.
Read more in the :ref:`User Guide <cohen_kappa>`.
Parameters
----------
y1 : array, shape = [n_samples]
Labels assigned by the first annotator.
y2 : array, shape = [n_samples]
Labels assigned by the second annotator. The kappa statistic is
symmetric, so swapping ``y1`` and ``y2`` doesn't change the value.
labels : array, shape = [n_classes], optional
List of labels to index the matrix. This may be used to select a
subset of labels. If None, all labels that appear at least once in
``y1`` or ``y2`` are used.
weights : str, optional
List of weighting type to calculate the score. None means no weighted;
"linear" means linear weighted; "quadratic" means quadratic weighted.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
kappa : float
The kappa statistic, which is a number between -1 and 1. The maximum
value means complete agreement; zero or lower means chance agreement.
References
----------
.. [1] J. Cohen (1960). "A coefficient of agreement for nominal scales".
Educational and Psychological Measurement 20(1):37-46.
doi:10.1177/001316446002000104.
.. [2] `R. Artstein and M. Poesio (2008). "Inter-coder agreement for
computational linguistics". Computational Linguistics 34(4):555-596.
<http://www.mitpressjournals.org/doi/abs/10.1162/coli.07-034-R2#.V0J1MJMrIWo>`_
.. [3] `Wikipedia entry for the Cohen's kappa.
<https://en.wikipedia.org/wiki/Cohen%27s_kappa>`_
"""
confusion = confusion_matrix(y1, y2, labels=labels,
sample_weight=sample_weight)
n_classes = confusion.shape[0]
sum0 = np.sum(confusion, axis=0)
sum1 = np.sum(confusion, axis=1)
expected = np.outer(sum0, sum1) / np.sum(sum0)
if weights is None:
w_mat = np.ones([n_classes, n_classes], dtype=np.int)
w_mat.flat[:: n_classes + 1] = 0
elif weights == "linear" or weights == "quadratic":
w_mat = np.zeros([n_classes, n_classes], dtype=np.int)
w_mat += np.arange(n_classes)
if weights == "linear":
w_mat = np.abs(w_mat - w_mat.T)
else:
w_mat = (w_mat - w_mat.T) ** 2
else:
raise ValueError("Unknown kappa weighting type.")
k = np.sum(w_mat * confusion) / np.sum(w_mat * expected)
return 1 - k
def jaccard_similarity_score(y_true, y_pred, normalize=True,
sample_weight=None):
"""Jaccard similarity coefficient score
The Jaccard index [1], or Jaccard similarity coefficient, defined as
the size of the intersection divided by the size of the union of two label
sets, is used to compare set of predicted labels for a sample to the
corresponding set of labels in ``y_true``.
Read more in the :ref:`User Guide <jaccard_similarity_score>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
normalize : bool, optional (default=True)
If ``False``, return the sum of the Jaccard similarity coefficient
over the sample set. Otherwise, return the average of Jaccard
similarity coefficient.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
score : float
If ``normalize == True``, return the average Jaccard similarity
coefficient, else it returns the sum of the Jaccard similarity
coefficient over the sample set.
The best performance is 1 with ``normalize == True`` and the number
of samples with ``normalize == False``.
See also
--------
accuracy_score, hamming_loss, zero_one_loss
Notes
-----
In binary and multiclass classification, this function is equivalent
to the ``accuracy_score``. It differs in the multilabel classification
problem.
References
----------
.. [1] `Wikipedia entry for the Jaccard index
<https://en.wikipedia.org/wiki/Jaccard_index>`_
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import jaccard_similarity_score
>>> y_pred = [0, 2, 1, 3]
>>> y_true = [0, 1, 2, 3]
>>> jaccard_similarity_score(y_true, y_pred)
0.5
>>> jaccard_similarity_score(y_true, y_pred, normalize=False)
2
In the multilabel case with binary label indicators:
>>> jaccard_similarity_score(np.array([[0, 1], [1, 1]]),\
np.ones((2, 2)))
0.75
"""
# Compute accuracy for each possible representation
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if y_type.startswith('multilabel'):
with np.errstate(divide='ignore', invalid='ignore'):
# oddly, we may get an "invalid" rather than a "divide" error here
pred_or_true = count_nonzero(y_true + y_pred, axis=1)
pred_and_true = count_nonzero(y_true.multiply(y_pred), axis=1)
score = pred_and_true / pred_or_true
score[pred_or_true == 0.0] = 1.0
else:
score = y_true == y_pred
return _weighted_sum(score, sample_weight, normalize)
def matthews_corrcoef(y_true, y_pred, sample_weight=None):
"""Compute the Matthews correlation coefficient (MCC) for binary classes
The Matthews correlation coefficient is used in machine learning as a
measure of the quality of binary (two-class) classifications. It takes into
account true and false positives and negatives and is generally regarded as
a balanced measure which can be used even if the classes are of very
different sizes. The MCC is in essence a correlation coefficient value
between -1 and +1. A coefficient of +1 represents a perfect prediction, 0
an average random prediction and -1 an inverse prediction. The statistic
is also known as the phi coefficient. [source: Wikipedia]
Only in the binary case does this relate to information about true and
false positives and negatives. See references below.
Read more in the :ref:`User Guide <matthews_corrcoef>`.
Parameters
----------
y_true : array, shape = [n_samples]
Ground truth (correct) target values.
y_pred : array, shape = [n_samples]
Estimated targets as returned by a classifier.
sample_weight : array-like of shape = [n_samples], default None
Sample weights.
Returns
-------
mcc : float
The Matthews correlation coefficient (+1 represents a perfect
prediction, 0 an average random prediction and -1 and inverse
prediction).
References
----------
.. [1] `Baldi, Brunak, Chauvin, Andersen and Nielsen, (2000). Assessing the
accuracy of prediction algorithms for classification: an overview
<http://dx.doi.org/10.1093/bioinformatics/16.5.412>`_
.. [2] `Wikipedia entry for the Matthews Correlation Coefficient
<https://en.wikipedia.org/wiki/Matthews_correlation_coefficient>`_
Examples
--------
>>> from sklearn.metrics import matthews_corrcoef
>>> y_true = [+1, +1, +1, -1]
>>> y_pred = [+1, -1, +1, +1]
>>> matthews_corrcoef(y_true, y_pred) # doctest: +ELLIPSIS
-0.33...
"""
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if y_type != "binary":
raise ValueError("%s is not supported" % y_type)
lb = LabelEncoder()
lb.fit(np.hstack([y_true, y_pred]))
y_true = lb.transform(y_true)
y_pred = lb.transform(y_pred)
mean_yt = np.average(y_true, weights=sample_weight)
mean_yp = np.average(y_pred, weights=sample_weight)
y_true_u_cent = y_true - mean_yt
y_pred_u_cent = y_pred - mean_yp
cov_ytyp = np.average(y_true_u_cent * y_pred_u_cent, weights=sample_weight)
var_yt = np.average(y_true_u_cent ** 2, weights=sample_weight)
var_yp = np.average(y_pred_u_cent ** 2, weights=sample_weight)
mcc = cov_ytyp / np.sqrt(var_yt * var_yp)
if np.isnan(mcc):
return 0.
else:
return mcc
def zero_one_loss(y_true, y_pred, normalize=True, sample_weight=None):
"""Zero-one classification loss.
If normalize is ``True``, return the fraction of misclassifications
(float), else it returns the number of misclassifications (int). The best
performance is 0.
Read more in the :ref:`User Guide <zero_one_loss>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
normalize : bool, optional (default=True)
If ``False``, return the number of misclassifications.
Otherwise, return the fraction of misclassifications.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
loss : float or int,
If ``normalize == True``, return the fraction of misclassifications
(float), else it returns the number of misclassifications (int).
Notes
-----
In multilabel classification, the zero_one_loss function corresponds to
the subset zero-one loss: for each sample, the entire set of labels must be
correctly predicted, otherwise the loss for that sample is equal to one.
See also
--------
accuracy_score, hamming_loss, jaccard_similarity_score
Examples
--------
>>> from sklearn.metrics import zero_one_loss
>>> y_pred = [1, 2, 3, 4]
>>> y_true = [2, 2, 3, 4]
>>> zero_one_loss(y_true, y_pred)
0.25
>>> zero_one_loss(y_true, y_pred, normalize=False)
1
In the multilabel case with binary label indicators:
>>> zero_one_loss(np.array([[0, 1], [1, 1]]), np.ones((2, 2)))
0.5
"""
score = accuracy_score(y_true, y_pred,
normalize=normalize,
sample_weight=sample_weight)
if normalize:
return 1 - score
else:
if sample_weight is not None:
n_samples = np.sum(sample_weight)
else:
n_samples = _num_samples(y_true)
return n_samples - score
def f1_score(y_true, y_pred, labels=None, pos_label=1, average='binary',
sample_weight=None):
"""Compute the F1 score, also known as balanced F-score or F-measure
The F1 score can be interpreted as a weighted average of the precision and
recall, where an F1 score reaches its best value at 1 and worst score at 0.
The relative contribution of precision and recall to the F1 score are
equal. The formula for the F1 score is::
F1 = 2 * (precision * recall) / (precision + recall)
In the multi-class and multi-label case, this is the weighted average of
the F1 score of each class.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
.. versionchanged:: 0.17
parameter *labels* improved for multiclass problem.
pos_label : str or int, 1 by default
The class to report if ``average='binary'`` and the data is binary.
If the data are multiclass or multilabel, this will be ignored;
setting ``labels=[pos_label]`` and ``average != 'binary'`` will report
scores for that label only.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
f1_score : float or array of float, shape = [n_unique_labels]
F1 score of the positive class in binary classification or weighted
average of the F1 scores of each class for the multiclass task.
References
----------
.. [1] `Wikipedia entry for the F1-score
<https://en.wikipedia.org/wiki/F1_score>`_
Examples
--------
>>> from sklearn.metrics import f1_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> f1_score(y_true, y_pred, average='macro') # doctest: +ELLIPSIS
0.26...
>>> f1_score(y_true, y_pred, average='micro') # doctest: +ELLIPSIS
0.33...
>>> f1_score(y_true, y_pred, average='weighted') # doctest: +ELLIPSIS
0.26...
>>> f1_score(y_true, y_pred, average=None)
array([ 0.8, 0. , 0. ])
"""
return fbeta_score(y_true, y_pred, 1, labels=labels,
pos_label=pos_label, average=average,
sample_weight=sample_weight)
def fbeta_score(y_true, y_pred, beta, labels=None, pos_label=1,
average='binary', sample_weight=None):
"""Compute the F-beta score
The F-beta score is the weighted harmonic mean of precision and recall,
reaching its optimal value at 1 and its worst value at 0.
The `beta` parameter determines the weight of precision in the combined
score. ``beta < 1`` lends more weight to precision, while ``beta > 1``
favors recall (``beta -> 0`` considers only precision, ``beta -> inf``
only recall).
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
beta : float
Weight of precision in harmonic mean.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
.. versionchanged:: 0.17
parameter *labels* improved for multiclass problem.
pos_label : str or int, 1 by default
The class to report if ``average='binary'`` and the data is binary.
If the data are multiclass or multilabel, this will be ignored;
setting ``labels=[pos_label]`` and ``average != 'binary'`` will report
scores for that label only.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
fbeta_score : float (if average is not None) or array of float, shape =\
[n_unique_labels]
F-beta score of the positive class in binary classification or weighted
average of the F-beta score of each class for the multiclass task.
References
----------
.. [1] R. Baeza-Yates and B. Ribeiro-Neto (2011).
Modern Information Retrieval. Addison Wesley, pp. 327-328.
.. [2] `Wikipedia entry for the F1-score
<https://en.wikipedia.org/wiki/F1_score>`_
Examples
--------
>>> from sklearn.metrics import fbeta_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> fbeta_score(y_true, y_pred, average='macro', beta=0.5)
... # doctest: +ELLIPSIS
0.23...
>>> fbeta_score(y_true, y_pred, average='micro', beta=0.5)
... # doctest: +ELLIPSIS
0.33...
>>> fbeta_score(y_true, y_pred, average='weighted', beta=0.5)
... # doctest: +ELLIPSIS
0.23...
>>> fbeta_score(y_true, y_pred, average=None, beta=0.5)
... # doctest: +ELLIPSIS
array([ 0.71..., 0. , 0. ])
"""
_, _, f, _ = precision_recall_fscore_support(y_true, y_pred,
beta=beta,
labels=labels,
pos_label=pos_label,
average=average,
warn_for=('f-score',),
sample_weight=sample_weight)
return f
def _prf_divide(numerator, denominator, metric, modifier, average, warn_for):
"""Performs division and handles divide-by-zero.
On zero-division, sets the corresponding result elements to zero
and raises a warning.
The metric, modifier and average arguments are used only for determining
an appropriate warning.
"""
result = numerator / denominator
mask = denominator == 0.0
if not np.any(mask):
return result
# remove infs
result[mask] = 0.0
# build appropriate warning
# E.g. "Precision and F-score are ill-defined and being set to 0.0 in
# labels with no predicted samples"
axis0 = 'sample'
axis1 = 'label'
if average == 'samples':
axis0, axis1 = axis1, axis0
if metric in warn_for and 'f-score' in warn_for:
msg_start = '{0} and F-score are'.format(metric.title())
elif metric in warn_for:
msg_start = '{0} is'.format(metric.title())
elif 'f-score' in warn_for:
msg_start = 'F-score is'
else:
return result
msg = ('{0} ill-defined and being set to 0.0 {{0}} '
'no {1} {2}s.'.format(msg_start, modifier, axis0))
if len(mask) == 1:
msg = msg.format('due to')
else:
msg = msg.format('in {0}s with'.format(axis1))
warnings.warn(msg, UndefinedMetricWarning, stacklevel=2)
return result
def precision_recall_fscore_support(y_true, y_pred, beta=1.0, labels=None,
pos_label=1, average=None,
warn_for=('precision', 'recall',
'f-score'),
sample_weight=None):
"""Compute precision, recall, F-measure and support for each class
The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of
true positives and ``fp`` the number of false positives. The precision is
intuitively the ability of the classifier not to label as positive a sample
that is negative.
The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of
true positives and ``fn`` the number of false negatives. The recall is
intuitively the ability of the classifier to find all the positive samples.
The F-beta score can be interpreted as a weighted harmonic mean of
the precision and recall, where an F-beta score reaches its best
value at 1 and worst score at 0.
The F-beta score weights recall more than precision by a factor of
``beta``. ``beta == 1.0`` means recall and precision are equally important.
The support is the number of occurrences of each class in ``y_true``.
If ``pos_label is None`` and in binary classification, this function
returns the average precision, recall and F-measure if ``average``
is one of ``'micro'``, ``'macro'``, ``'weighted'`` or ``'samples'``.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
beta : float, 1.0 by default
The strength of recall versus precision in the F-score.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
pos_label : str or int, 1 by default
The class to report if ``average='binary'`` and the data is binary.
If the data are multiclass or multilabel, this will be ignored;
setting ``labels=[pos_label]`` and ``average != 'binary'`` will report
scores for that label only.
average : string, [None (default), 'binary', 'micro', 'macro', 'samples', \
'weighted']
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
warn_for : tuple or set, for internal use
This determines which warnings will be made in the case that this
function is being used to return only one of its metrics.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
precision : float (if average is not None) or array of float, shape =\
[n_unique_labels]
recall : float (if average is not None) or array of float, , shape =\
[n_unique_labels]
fbeta_score : float (if average is not None) or array of float, shape =\
[n_unique_labels]
support : int (if average is not None) or array of int, shape =\
[n_unique_labels]
The number of occurrences of each label in ``y_true``.
References
----------
.. [1] `Wikipedia entry for the Precision and recall
<https://en.wikipedia.org/wiki/Precision_and_recall>`_
.. [2] `Wikipedia entry for the F1-score
<https://en.wikipedia.org/wiki/F1_score>`_
.. [3] `Discriminative Methods for Multi-labeled Classification Advances
in Knowledge Discovery and Data Mining (2004), pp. 22-30 by Shantanu
Godbole, Sunita Sarawagi
<http://www.godbole.net/shantanu/pubs/multilabelsvm-pakdd04.pdf>`_
Examples
--------
>>> from sklearn.metrics import precision_recall_fscore_support
>>> y_true = np.array(['cat', 'dog', 'pig', 'cat', 'dog', 'pig'])
>>> y_pred = np.array(['cat', 'pig', 'dog', 'cat', 'cat', 'dog'])
>>> precision_recall_fscore_support(y_true, y_pred, average='macro')
... # doctest: +ELLIPSIS
(0.22..., 0.33..., 0.26..., None)
>>> precision_recall_fscore_support(y_true, y_pred, average='micro')
... # doctest: +ELLIPSIS
(0.33..., 0.33..., 0.33..., None)
>>> precision_recall_fscore_support(y_true, y_pred, average='weighted')
... # doctest: +ELLIPSIS
(0.22..., 0.33..., 0.26..., None)
It is possible to compute per-label precisions, recalls, F1-scores and
supports instead of averaging:
>>> precision_recall_fscore_support(y_true, y_pred, average=None,
... labels=['pig', 'dog', 'cat'])
... # doctest: +ELLIPSIS,+NORMALIZE_WHITESPACE
(array([ 0. , 0. , 0.66...]),
array([ 0., 0., 1.]),
array([ 0. , 0. , 0.8]),
array([2, 2, 2]))
"""
average_options = (None, 'micro', 'macro', 'weighted', 'samples')
if average not in average_options and average != 'binary':
raise ValueError('average has to be one of ' +
str(average_options))
if beta <= 0:
raise ValueError("beta should be >0 in the F-beta score")
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
present_labels = unique_labels(y_true, y_pred)
if average == 'binary':
if y_type == 'binary':
if pos_label not in present_labels:
if len(present_labels) < 2:
# Only negative labels
return (0., 0., 0., 0)
else:
raise ValueError("pos_label=%r is not a valid label: %r" %
(pos_label, present_labels))
labels = [pos_label]
else:
raise ValueError("Target is %s but average='binary'. Please "
"choose another average setting." % y_type)
elif pos_label not in (None, 1):
warnings.warn("Note that pos_label (set to %r) is ignored when "
"average != 'binary' (got %r). You may use "
"labels=[pos_label] to specify a single positive class."
% (pos_label, average), UserWarning)
if labels is None:
labels = present_labels
n_labels = None
else:
n_labels = len(labels)
labels = np.hstack([labels, np.setdiff1d(present_labels, labels,
assume_unique=True)])
# Calculate tp_sum, pred_sum, true_sum ###
if y_type.startswith('multilabel'):
sum_axis = 1 if average == 'samples' else 0
# All labels are index integers for multilabel.
# Select labels:
if not np.all(labels == present_labels):
if np.max(labels) > np.max(present_labels):
raise ValueError('All labels must be in [0, n labels). '
'Got %d > %d' %
(np.max(labels), np.max(present_labels)))
if np.min(labels) < 0:
raise ValueError('All labels must be in [0, n labels). '
'Got %d < 0' % np.min(labels))
y_true = y_true[:, labels[:n_labels]]
y_pred = y_pred[:, labels[:n_labels]]
# calculate weighted counts
true_and_pred = y_true.multiply(y_pred)
tp_sum = count_nonzero(true_and_pred, axis=sum_axis,
sample_weight=sample_weight)
pred_sum = count_nonzero(y_pred, axis=sum_axis,
sample_weight=sample_weight)
true_sum = count_nonzero(y_true, axis=sum_axis,
sample_weight=sample_weight)
elif average == 'samples':
raise ValueError("Sample-based precision, recall, fscore is "
"not meaningful outside multilabel "
"classification. See the accuracy_score instead.")
else:
le = LabelEncoder()
le.fit(labels)
y_true = le.transform(y_true)
y_pred = le.transform(y_pred)
sorted_labels = le.classes_
# labels are now from 0 to len(labels) - 1 -> use bincount
tp = y_true == y_pred
tp_bins = y_true[tp]
if sample_weight is not None:
tp_bins_weights = np.asarray(sample_weight)[tp]
else:
tp_bins_weights = None
if len(tp_bins):
tp_sum = bincount(tp_bins, weights=tp_bins_weights,
minlength=len(labels))
else:
# Pathological case
true_sum = pred_sum = tp_sum = np.zeros(len(labels))
if len(y_pred):
pred_sum = bincount(y_pred, weights=sample_weight,
minlength=len(labels))
if len(y_true):
true_sum = bincount(y_true, weights=sample_weight,
minlength=len(labels))
# Retain only selected labels
indices = np.searchsorted(sorted_labels, labels[:n_labels])
tp_sum = tp_sum[indices]
true_sum = true_sum[indices]
pred_sum = pred_sum[indices]
if average == 'micro':
tp_sum = np.array([tp_sum.sum()])
pred_sum = np.array([pred_sum.sum()])
true_sum = np.array([true_sum.sum()])
# Finally, we have all our sufficient statistics. Divide! #
beta2 = beta ** 2
with np.errstate(divide='ignore', invalid='ignore'):
# Divide, and on zero-division, set scores to 0 and warn:
# Oddly, we may get an "invalid" rather than a "divide" error
# here.
precision = _prf_divide(tp_sum, pred_sum,
'precision', 'predicted', average, warn_for)
recall = _prf_divide(tp_sum, true_sum,
'recall', 'true', average, warn_for)
# Don't need to warn for F: either P or R warned, or tp == 0 where pos
# and true are nonzero, in which case, F is well-defined and zero
f_score = ((1 + beta2) * precision * recall /
(beta2 * precision + recall))
f_score[tp_sum == 0] = 0.0
# Average the results
if average == 'weighted':
weights = true_sum
if weights.sum() == 0:
return 0, 0, 0, None
elif average == 'samples':
weights = sample_weight
else:
weights = None
if average is not None:
assert average != 'binary' or len(precision) == 1
precision = np.average(precision, weights=weights)
recall = np.average(recall, weights=weights)
f_score = np.average(f_score, weights=weights)
true_sum = None # return no support
return precision, recall, f_score, true_sum
def precision_score(y_true, y_pred, labels=None, pos_label=1,
average='binary', sample_weight=None):
"""Compute the precision
The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of
true positives and ``fp`` the number of false positives. The precision is
intuitively the ability of the classifier not to label as positive a sample
that is negative.
The best value is 1 and the worst value is 0.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
.. versionchanged:: 0.17
parameter *labels* improved for multiclass problem.
pos_label : str or int, 1 by default
The class to report if ``average='binary'`` and the data is binary.
If the data are multiclass or multilabel, this will be ignored;
setting ``labels=[pos_label]`` and ``average != 'binary'`` will report
scores for that label only.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
precision : float (if average is not None) or array of float, shape =\
[n_unique_labels]
Precision of the positive class in binary classification or weighted
average of the precision of each class for the multiclass task.
Examples
--------
>>> from sklearn.metrics import precision_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> precision_score(y_true, y_pred, average='macro') # doctest: +ELLIPSIS
0.22...
>>> precision_score(y_true, y_pred, average='micro') # doctest: +ELLIPSIS
0.33...
>>> precision_score(y_true, y_pred, average='weighted')
... # doctest: +ELLIPSIS
0.22...
>>> precision_score(y_true, y_pred, average=None) # doctest: +ELLIPSIS
array([ 0.66..., 0. , 0. ])
"""
p, _, _, _ = precision_recall_fscore_support(y_true, y_pred,
labels=labels,
pos_label=pos_label,
average=average,
warn_for=('precision',),
sample_weight=sample_weight)
return p
def recall_score(y_true, y_pred, labels=None, pos_label=1, average='binary',
sample_weight=None):
"""Compute the recall
The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of
true positives and ``fn`` the number of false negatives. The recall is
intuitively the ability of the classifier to find all the positive samples.
The best value is 1 and the worst value is 0.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
.. versionchanged:: 0.17
parameter *labels* improved for multiclass problem.
pos_label : str or int, 1 by default
The class to report if ``average='binary'`` and the data is binary.
If the data are multiclass or multilabel, this will be ignored;
setting ``labels=[pos_label]`` and ``average != 'binary'`` will report
scores for that label only.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
recall : float (if average is not None) or array of float, shape =\
[n_unique_labels]
Recall of the positive class in binary classification or weighted
average of the recall of each class for the multiclass task.
Examples
--------
>>> from sklearn.metrics import recall_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> recall_score(y_true, y_pred, average='macro') # doctest: +ELLIPSIS
0.33...
>>> recall_score(y_true, y_pred, average='micro') # doctest: +ELLIPSIS
0.33...
>>> recall_score(y_true, y_pred, average='weighted') # doctest: +ELLIPSIS
0.33...
>>> recall_score(y_true, y_pred, average=None)
array([ 1., 0., 0.])
"""
_, r, _, _ = precision_recall_fscore_support(y_true, y_pred,
labels=labels,
pos_label=pos_label,
average=average,
warn_for=('recall',),
sample_weight=sample_weight)
return r
def classification_report(y_true, y_pred, labels=None, target_names=None,
sample_weight=None, digits=2):
"""Build a text report showing the main classification metrics
Read more in the :ref:`User Guide <classification_report>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : array, shape = [n_labels]
Optional list of label indices to include in the report.
target_names : list of strings
Optional display names matching the labels (same order).
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
digits : int
Number of digits for formatting output floating point values
Returns
-------
report : string
Text summary of the precision, recall, F1 score for each class.
The reported averages are a prevalence-weighted macro-average across
classes (equivalent to :func:`precision_recall_fscore_support` with
``average='weighted'``).
Note that in binary classification, recall of the positive class
is also known as "sensitivity"; recall of the negative class is
"specificity".
Examples
--------
>>> from sklearn.metrics import classification_report
>>> y_true = [0, 1, 2, 2, 2]
>>> y_pred = [0, 0, 2, 2, 1]
>>> target_names = ['class 0', 'class 1', 'class 2']
>>> print(classification_report(y_true, y_pred, target_names=target_names))
precision recall f1-score support
<BLANKLINE>
class 0 0.50 1.00 0.67 1
class 1 0.00 0.00 0.00 1
class 2 1.00 0.67 0.80 3
<BLANKLINE>
avg / total 0.70 0.60 0.61 5
<BLANKLINE>
"""
if labels is None:
labels = unique_labels(y_true, y_pred)
else:
labels = np.asarray(labels)
if target_names is not None and len(labels) != len(target_names):
warnings.warn(
"labels size, {0}, does not match size of target_names, {1}"
.format(len(labels), len(target_names))
)
last_line_heading = 'avg / total'
if target_names is None:
target_names = [u'%s' % l for l in labels]
name_width = max(len(cn) for cn in target_names)
width = max(name_width, len(last_line_heading), digits)
headers = ["precision", "recall", "f1-score", "support"]
head_fmt = u'{:>{width}s} ' + u' {:>9}' * len(headers)
report = head_fmt.format(u'', *headers, width=width)
report += u'\n\n'
p, r, f1, s = precision_recall_fscore_support(y_true, y_pred,
labels=labels,
average=None,
sample_weight=sample_weight)
row_fmt = u'{:>{width}s} ' + u' {:>9.{digits}f}' * 3 + u' {:>9}\n'
rows = zip(target_names, p, r, f1, s)
for row in rows:
report += row_fmt.format(*row, width=width, digits=digits)
report += u'\n'
# compute averages
report += row_fmt.format(last_line_heading,
np.average(p, weights=s),
np.average(r, weights=s),
np.average(f1, weights=s),
np.sum(s),
width=width, digits=digits)
return report
def hamming_loss(y_true, y_pred, labels=None, sample_weight=None,
classes=None):
"""Compute the average Hamming loss.
The Hamming loss is the fraction of labels that are incorrectly predicted.
Read more in the :ref:`User Guide <hamming_loss>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
labels : array, shape = [n_labels], optional (default=None)
Integer array of labels. If not provided, labels will be inferred
from y_true and y_pred.
.. versionadded:: 0.18
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
.. versionadded:: 0.18
classes : array, shape = [n_labels], optional
Integer array of labels.
.. deprecated:: 0.18
This parameter has been deprecated in favor of ``labels`` in
version 0.18 and will be removed in 0.20. Use ``labels`` instead.
Returns
-------
loss : float or int,
Return the average Hamming loss between element of ``y_true`` and
``y_pred``.
See Also
--------
accuracy_score, jaccard_similarity_score, zero_one_loss
Notes
-----
In multiclass classification, the Hamming loss correspond to the Hamming
distance between ``y_true`` and ``y_pred`` which is equivalent to the
subset ``zero_one_loss`` function.
In multilabel classification, the Hamming loss is different from the
subset zero-one loss. The zero-one loss considers the entire set of labels
for a given sample incorrect if it does entirely match the true set of
labels. Hamming loss is more forgiving in that it penalizes the individual
labels.
The Hamming loss is upperbounded by the subset zero-one loss. When
normalized over samples, the Hamming loss is always between 0 and 1.
References
----------
.. [1] Grigorios Tsoumakas, Ioannis Katakis. Multi-Label Classification:
An Overview. International Journal of Data Warehousing & Mining,
3(3), 1-13, July-September 2007.
.. [2] `Wikipedia entry on the Hamming distance
<https://en.wikipedia.org/wiki/Hamming_distance>`_
Examples
--------
>>> from sklearn.metrics import hamming_loss
>>> y_pred = [1, 2, 3, 4]
>>> y_true = [2, 2, 3, 4]
>>> hamming_loss(y_true, y_pred)
0.25
In the multilabel case with binary label indicators:
>>> hamming_loss(np.array([[0, 1], [1, 1]]), np.zeros((2, 2)))
0.75
"""
if classes is not None:
warnings.warn("'classes' was renamed to 'labels' in version 0.18 and "
"will be removed in 0.20.", DeprecationWarning)
labels = classes
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if labels is None:
labels = unique_labels(y_true, y_pred)
else:
labels = np.asarray(labels)
if sample_weight is None:
weight_average = 1.
else:
weight_average = np.mean(sample_weight)
if y_type.startswith('multilabel'):
n_differences = count_nonzero(y_true - y_pred,
sample_weight=sample_weight)
return (n_differences /
(y_true.shape[0] * len(labels) * weight_average))
elif y_type in ["binary", "multiclass"]:
return _weighted_sum(y_true != y_pred, sample_weight, normalize=True)
else:
raise ValueError("{0} is not supported".format(y_type))
def log_loss(y_true, y_pred, eps=1e-15, normalize=True, sample_weight=None,
labels=None):
"""Log loss, aka logistic loss or cross-entropy loss.
This is the loss function used in (multinomial) logistic regression
and extensions of it such as neural networks, defined as the negative
log-likelihood of the true labels given a probabilistic classifier's
predictions. The log loss is only defined for two or more labels.
For a single sample with true label yt in {0,1} and
estimated probability yp that yt = 1, the log loss is
-log P(yt|yp) = -(yt log(yp) + (1 - yt) log(1 - yp))
Read more in the :ref:`User Guide <log_loss>`.
Parameters
----------
y_true : array-like or label indicator matrix
Ground truth (correct) labels for n_samples samples.
y_pred : array-like of float, shape = (n_samples, n_classes) or (n_samples,)
Predicted probabilities, as returned by a classifier's
predict_proba method. If ``y_pred.shape = (n_samples,)``
the probabilities provided are assumed to be that of the
positive class. The labels in ``y_pred`` are assumed to be
ordered alphabetically, as done by
:class:`preprocessing.LabelBinarizer`.
eps : float
Log loss is undefined for p=0 or p=1, so probabilities are
clipped to max(eps, min(1 - eps, p)).
normalize : bool, optional (default=True)
If true, return the mean loss per sample.
Otherwise, return the sum of the per-sample losses.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
labels : array-like, optional (default=None)
If not provided, labels will be inferred from y_true. If ``labels``
is ``None`` and ``y_pred`` has shape (n_samples,) the labels are
assumed to be binary and are inferred from ``y_true``.
.. versionadded:: 0.18
Returns
-------
loss : float
Examples
--------
>>> log_loss(["spam", "ham", "ham", "spam"], # doctest: +ELLIPSIS
... [[.1, .9], [.9, .1], [.8, .2], [.35, .65]])
0.21616...
References
----------
C.M. Bishop (2006). Pattern Recognition and Machine Learning. Springer,
p. 209.
Notes
-----
The logarithm used is the natural logarithm (base-e).
"""
y_pred = check_array(y_pred, ensure_2d=False)
check_consistent_length(y_pred, y_true)
lb = LabelBinarizer()
if labels is not None:
lb.fit(labels)
else:
lb.fit(y_true)
if len(lb.classes_) == 1:
if labels is None:
raise ValueError('y_true contains only one label ({0}). Please '
'provide the true labels explicitly through the '
'labels argument.'.format(lb.classes_[0]))
else:
raise ValueError('The labels array needs to contain at least two '
'labels for log_loss, '
'got {0}.'.format(lb.classes_))
transformed_labels = lb.transform(y_true)
if transformed_labels.shape[1] == 1:
transformed_labels = np.append(1 - transformed_labels,
transformed_labels, axis=1)
# Clipping
y_pred = np.clip(y_pred, eps, 1 - eps)
# If y_pred is of single dimension, assume y_true to be binary
# and then check.
if y_pred.ndim == 1:
y_pred = y_pred[:, np.newaxis]
if y_pred.shape[1] == 1:
y_pred = np.append(1 - y_pred, y_pred, axis=1)
# Check if dimensions are consistent.
transformed_labels = check_array(transformed_labels)
if len(lb.classes_) != y_pred.shape[1]:
if labels is None:
raise ValueError("y_true and y_pred contain different number of "
"classes {0}, {1}. Please provide the true "
"labels explicitly through the labels argument. "
"Classes found in "
"y_true: {2}".format(transformed_labels.shape[1],
y_pred.shape[1],
lb.classes_))
else:
raise ValueError('The number of classes in labels is different '
'from that in y_pred. Classes found in '
'labels: {0}'.format(lb.classes_))
# Renormalize
y_pred /= y_pred.sum(axis=1)[:, np.newaxis]
loss = -(transformed_labels * np.log(y_pred)).sum(axis=1)
return _weighted_sum(loss, sample_weight, normalize)
def hinge_loss(y_true, pred_decision, labels=None, sample_weight=None):
"""Average hinge loss (non-regularized)
In binary class case, assuming labels in y_true are encoded with +1 and -1,
when a prediction mistake is made, ``margin = y_true * pred_decision`` is
always negative (since the signs disagree), implying ``1 - margin`` is
always greater than 1. The cumulated hinge loss is therefore an upper
bound of the number of mistakes made by the classifier.
In multiclass case, the function expects that either all the labels are
included in y_true or an optional labels argument is provided which
contains all the labels. The multilabel margin is calculated according
to Crammer-Singer's method. As in the binary case, the cumulated hinge loss
is an upper bound of the number of mistakes made by the classifier.
Read more in the :ref:`User Guide <hinge_loss>`.
Parameters
----------
y_true : array, shape = [n_samples]
True target, consisting of integers of two values. The positive label
must be greater than the negative label.
pred_decision : array, shape = [n_samples] or [n_samples, n_classes]
Predicted decisions, as output by decision_function (floats).
labels : array, optional, default None
Contains all the labels for the problem. Used in multiclass hinge loss.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
loss : float
References
----------
.. [1] `Wikipedia entry on the Hinge loss
<https://en.wikipedia.org/wiki/Hinge_loss>`_
.. [2] Koby Crammer, Yoram Singer. On the Algorithmic
Implementation of Multiclass Kernel-based Vector
Machines. Journal of Machine Learning Research 2,
(2001), 265-292
.. [3] `L1 AND L2 Regularization for Multiclass Hinge Loss Models
by Robert C. Moore, John DeNero.
<http://www.ttic.edu/sigml/symposium2011/papers/
Moore+DeNero_Regularization.pdf>`_
Examples
--------
>>> from sklearn import svm
>>> from sklearn.metrics import hinge_loss
>>> X = [[0], [1]]
>>> y = [-1, 1]
>>> est = svm.LinearSVC(random_state=0)
>>> est.fit(X, y)
LinearSVC(C=1.0, class_weight=None, dual=True, fit_intercept=True,
intercept_scaling=1, loss='squared_hinge', max_iter=1000,
multi_class='ovr', penalty='l2', random_state=0, tol=0.0001,
verbose=0)
>>> pred_decision = est.decision_function([[-2], [3], [0.5]])
>>> pred_decision # doctest: +ELLIPSIS
array([-2.18..., 2.36..., 0.09...])
>>> hinge_loss([-1, 1, 1], pred_decision) # doctest: +ELLIPSIS
0.30...
In the multiclass case:
>>> X = np.array([[0], [1], [2], [3]])
>>> Y = np.array([0, 1, 2, 3])
>>> labels = np.array([0, 1, 2, 3])
>>> est = svm.LinearSVC()
>>> est.fit(X, Y)
LinearSVC(C=1.0, class_weight=None, dual=True, fit_intercept=True,
intercept_scaling=1, loss='squared_hinge', max_iter=1000,
multi_class='ovr', penalty='l2', random_state=None, tol=0.0001,
verbose=0)
>>> pred_decision = est.decision_function([[-1], [2], [3]])
>>> y_true = [0, 2, 3]
>>> hinge_loss(y_true, pred_decision, labels) #doctest: +ELLIPSIS
0.56...
"""
check_consistent_length(y_true, pred_decision, sample_weight)
pred_decision = check_array(pred_decision, ensure_2d=False)
y_true = column_or_1d(y_true)
y_true_unique = np.unique(y_true)
if y_true_unique.size > 2:
if (labels is None and pred_decision.ndim > 1 and
(np.size(y_true_unique) != pred_decision.shape[1])):
raise ValueError("Please include all labels in y_true "
"or pass labels as third argument")
if labels is None:
labels = y_true_unique
le = LabelEncoder()
le.fit(labels)
y_true = le.transform(y_true)
mask = np.ones_like(pred_decision, dtype=bool)
mask[np.arange(y_true.shape[0]), y_true] = False
margin = pred_decision[~mask]
margin -= np.max(pred_decision[mask].reshape(y_true.shape[0], -1),
axis=1)
else:
# Handles binary class case
# this code assumes that positive and negative labels
# are encoded as +1 and -1 respectively
pred_decision = column_or_1d(pred_decision)
pred_decision = np.ravel(pred_decision)
lbin = LabelBinarizer(neg_label=-1)
y_true = lbin.fit_transform(y_true)[:, 0]
try:
margin = y_true * pred_decision
except TypeError:
raise TypeError("pred_decision should be an array of floats.")
losses = 1 - margin
# The hinge_loss doesn't penalize good enough predictions.
losses[losses <= 0] = 0
return np.average(losses, weights=sample_weight)
def _check_binary_probabilistic_predictions(y_true, y_prob):
"""Check that y_true is binary and y_prob contains valid probabilities"""
check_consistent_length(y_true, y_prob)
labels = np.unique(y_true)
if len(labels) > 2:
raise ValueError("Only binary classification is supported. "
"Provided labels %s." % labels)
if y_prob.max() > 1:
raise ValueError("y_prob contains values greater than 1.")
if y_prob.min() < 0:
raise ValueError("y_prob contains values less than 0.")
return label_binarize(y_true, labels)[:, 0]
def brier_score_loss(y_true, y_prob, sample_weight=None, pos_label=None):
"""Compute the Brier score.
The smaller the Brier score, the better, hence the naming with "loss".
Across all items in a set N predictions, the Brier score measures the
mean squared difference between (1) the predicted probability assigned
to the possible outcomes for item i, and (2) the actual outcome.
Therefore, the lower the Brier score is for a set of predictions, the
better the predictions are calibrated. Note that the Brier score always
takes on a value between zero and one, since this is the largest
possible difference between a predicted probability (which must be
between zero and one) and the actual outcome (which can take on values
of only 0 and 1).
The Brier score is appropriate for binary and categorical outcomes that
can be structured as true or false, but is inappropriate for ordinal
variables which can take on three or more values (this is because the
Brier score assumes that all possible outcomes are equivalently
"distant" from one another). Which label is considered to be the positive
label is controlled via the parameter pos_label, which defaults to 1.
Read more in the :ref:`User Guide <calibration>`.
Parameters
----------
y_true : array, shape (n_samples,)
True targets.
y_prob : array, shape (n_samples,)
Probabilities of the positive class.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
pos_label : int or str, default=None
Label of the positive class. If None, the maximum label is used as
positive class
Returns
-------
score : float
Brier score
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import brier_score_loss
>>> y_true = np.array([0, 1, 1, 0])
>>> y_true_categorical = np.array(["spam", "ham", "ham", "spam"])
>>> y_prob = np.array([0.1, 0.9, 0.8, 0.3])
>>> brier_score_loss(y_true, y_prob) # doctest: +ELLIPSIS
0.037...
>>> brier_score_loss(y_true, 1-y_prob, pos_label=0) # doctest: +ELLIPSIS
0.037...
>>> brier_score_loss(y_true_categorical, y_prob, \
pos_label="ham") # doctest: +ELLIPSIS
0.037...
>>> brier_score_loss(y_true, np.array(y_prob) > 0.5)
0.0
References
----------
.. [1] `Wikipedia entry for the Brier score.
<https://en.wikipedia.org/wiki/Brier_score>`_
"""
y_true = column_or_1d(y_true)
y_prob = column_or_1d(y_prob)
assert_all_finite(y_true)
assert_all_finite(y_prob)
if pos_label is None:
pos_label = y_true.max()
y_true = np.array(y_true == pos_label, int)
y_true = _check_binary_probabilistic_predictions(y_true, y_prob)
return np.average((y_true - y_prob) ** 2, weights=sample_weight)
| bsd-3-clause |
shyamalschandra/scikit-learn | sklearn/metrics/tests/test_regression.py | 272 | 6066 | from __future__ import division, print_function
import numpy as np
from itertools import product
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.metrics import explained_variance_score
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import mean_squared_error
from sklearn.metrics import median_absolute_error
from sklearn.metrics import r2_score
from sklearn.metrics.regression import _check_reg_targets
def test_regression_metrics(n_samples=50):
y_true = np.arange(n_samples)
y_pred = y_true + 1
assert_almost_equal(mean_squared_error(y_true, y_pred), 1.)
assert_almost_equal(mean_absolute_error(y_true, y_pred), 1.)
assert_almost_equal(median_absolute_error(y_true, y_pred), 1.)
assert_almost_equal(r2_score(y_true, y_pred), 0.995, 2)
assert_almost_equal(explained_variance_score(y_true, y_pred), 1.)
def test_multioutput_regression():
y_true = np.array([[1, 0, 0, 1], [0, 1, 1, 1], [1, 1, 0, 1]])
y_pred = np.array([[0, 0, 0, 1], [1, 0, 1, 1], [0, 0, 0, 1]])
error = mean_squared_error(y_true, y_pred)
assert_almost_equal(error, (1. / 3 + 2. / 3 + 2. / 3) / 4.)
# mean_absolute_error and mean_squared_error are equal because
# it is a binary problem.
error = mean_absolute_error(y_true, y_pred)
assert_almost_equal(error, (1. / 3 + 2. / 3 + 2. / 3) / 4.)
error = r2_score(y_true, y_pred, multioutput='variance_weighted')
assert_almost_equal(error, 1. - 5. / 2)
error = r2_score(y_true, y_pred, multioutput='uniform_average')
assert_almost_equal(error, -.875)
def test_regression_metrics_at_limits():
assert_almost_equal(mean_squared_error([0.], [0.]), 0.00, 2)
assert_almost_equal(mean_absolute_error([0.], [0.]), 0.00, 2)
assert_almost_equal(median_absolute_error([0.], [0.]), 0.00, 2)
assert_almost_equal(explained_variance_score([0.], [0.]), 1.00, 2)
assert_almost_equal(r2_score([0., 1], [0., 1]), 1.00, 2)
def test__check_reg_targets():
# All of length 3
EXAMPLES = [
("continuous", [1, 2, 3], 1),
("continuous", [[1], [2], [3]], 1),
("continuous-multioutput", [[1, 1], [2, 2], [3, 1]], 2),
("continuous-multioutput", [[5, 1], [4, 2], [3, 1]], 2),
("continuous-multioutput", [[1, 3, 4], [2, 2, 2], [3, 1, 1]], 3),
]
for (type1, y1, n_out1), (type2, y2, n_out2) in product(EXAMPLES,
repeat=2):
if type1 == type2 and n_out1 == n_out2:
y_type, y_check1, y_check2, multioutput = _check_reg_targets(
y1, y2, None)
assert_equal(type1, y_type)
if type1 == 'continuous':
assert_array_equal(y_check1, np.reshape(y1, (-1, 1)))
assert_array_equal(y_check2, np.reshape(y2, (-1, 1)))
else:
assert_array_equal(y_check1, y1)
assert_array_equal(y_check2, y2)
else:
assert_raises(ValueError, _check_reg_targets, y1, y2, None)
def test_regression_multioutput_array():
y_true = [[1, 2], [2.5, -1], [4.5, 3], [5, 7]]
y_pred = [[1, 1], [2, -1], [5, 4], [5, 6.5]]
mse = mean_squared_error(y_true, y_pred, multioutput='raw_values')
mae = mean_absolute_error(y_true, y_pred, multioutput='raw_values')
r = r2_score(y_true, y_pred, multioutput='raw_values')
evs = explained_variance_score(y_true, y_pred, multioutput='raw_values')
assert_array_almost_equal(mse, [0.125, 0.5625], decimal=2)
assert_array_almost_equal(mae, [0.25, 0.625], decimal=2)
assert_array_almost_equal(r, [0.95, 0.93], decimal=2)
assert_array_almost_equal(evs, [0.95, 0.93], decimal=2)
# mean_absolute_error and mean_squared_error are equal because
# it is a binary problem.
y_true = [[0, 0]]*4
y_pred = [[1, 1]]*4
mse = mean_squared_error(y_true, y_pred, multioutput='raw_values')
mae = mean_absolute_error(y_true, y_pred, multioutput='raw_values')
r = r2_score(y_true, y_pred, multioutput='raw_values')
assert_array_almost_equal(mse, [1., 1.], decimal=2)
assert_array_almost_equal(mae, [1., 1.], decimal=2)
assert_array_almost_equal(r, [0., 0.], decimal=2)
r = r2_score([[0, -1], [0, 1]], [[2, 2], [1, 1]], multioutput='raw_values')
assert_array_almost_equal(r, [0, -3.5], decimal=2)
assert_equal(np.mean(r), r2_score([[0, -1], [0, 1]], [[2, 2], [1, 1]],
multioutput='uniform_average'))
evs = explained_variance_score([[0, -1], [0, 1]], [[2, 2], [1, 1]],
multioutput='raw_values')
assert_array_almost_equal(evs, [0, -1.25], decimal=2)
# Checking for the condition in which both numerator and denominator is
# zero.
y_true = [[1, 3], [-1, 2]]
y_pred = [[1, 4], [-1, 1]]
r2 = r2_score(y_true, y_pred, multioutput='raw_values')
assert_array_almost_equal(r2, [1., -3.], decimal=2)
assert_equal(np.mean(r2), r2_score(y_true, y_pred,
multioutput='uniform_average'))
evs = explained_variance_score(y_true, y_pred, multioutput='raw_values')
assert_array_almost_equal(evs, [1., -3.], decimal=2)
assert_equal(np.mean(evs), explained_variance_score(y_true, y_pred))
def test_regression_custom_weights():
y_true = [[1, 2], [2.5, -1], [4.5, 3], [5, 7]]
y_pred = [[1, 1], [2, -1], [5, 4], [5, 6.5]]
msew = mean_squared_error(y_true, y_pred, multioutput=[0.4, 0.6])
maew = mean_absolute_error(y_true, y_pred, multioutput=[0.4, 0.6])
rw = r2_score(y_true, y_pred, multioutput=[0.4, 0.6])
evsw = explained_variance_score(y_true, y_pred, multioutput=[0.4, 0.6])
assert_almost_equal(msew, 0.39, decimal=2)
assert_almost_equal(maew, 0.475, decimal=3)
assert_almost_equal(rw, 0.94, decimal=2)
assert_almost_equal(evsw, 0.94, decimal=2)
| bsd-3-clause |
mvictor212/hmmlearn | examples/plot_hmm_stock_analysis.py | 4 | 2785 | """
==========================
Gaussian HMM of stock data
==========================
This script shows how to use Gaussian HMM.
It uses stock price data, which can be obtained from yahoo finance.
For more information on how to get stock prices with matplotlib, please refer
to date_demo1.py of matplotlib.
"""
from __future__ import print_function
import datetime
import numpy as np
import pylab as pl
from matplotlib.finance import quotes_historical_yahoo
from matplotlib.dates import YearLocator, MonthLocator, DateFormatter
from hmmlearn.hmm import GaussianHMM
print(__doc__)
###############################################################################
# Downloading the data
date1 = datetime.date(1995, 1, 1) # start date
date2 = datetime.date(2012, 1, 6) # end date
# get quotes from yahoo finance
quotes = quotes_historical_yahoo("INTC", date1, date2)
if len(quotes) == 0:
raise SystemExit
# unpack quotes
dates = np.array([q[0] for q in quotes], dtype=int)
close_v = np.array([q[2] for q in quotes])
volume = np.array([q[5] for q in quotes])[1:]
# take diff of close value
# this makes len(diff) = len(close_t) - 1
# therefore, others quantity also need to be shifted
diff = close_v[1:] - close_v[:-1]
dates = dates[1:]
close_v = close_v[1:]
# pack diff and volume for training
X = np.column_stack([diff, volume])
###############################################################################
# Run Gaussian HMM
print("fitting to HMM and decoding ...", end='')
n_components = 5
# make an HMM instance and execute fit
model = GaussianHMM(n_components, covariance_type="diag", n_iter=1000)
model.fit([X])
# predict the optimal sequence of internal hidden state
hidden_states = model.predict(X)
print("done\n")
###############################################################################
# print trained parameters and plot
print("Transition matrix")
print(model.transmat_)
print()
print("means and vars of each hidden state")
for i in range(n_components):
print("%dth hidden state" % i)
print("mean = ", model.means_[i])
print("var = ", np.diag(model.covars_[i]))
print()
years = YearLocator() # every year
months = MonthLocator() # every month
yearsFmt = DateFormatter('%Y')
fig = pl.figure()
ax = fig.add_subplot(111)
for i in range(n_components):
# use fancy indexing to plot data in each state
idx = (hidden_states == i)
ax.plot_date(dates[idx], close_v[idx], 'o', label="%dth hidden state" % i)
ax.legend()
# format the ticks
ax.xaxis.set_major_locator(years)
ax.xaxis.set_major_formatter(yearsFmt)
ax.xaxis.set_minor_locator(months)
ax.autoscale_view()
# format the coords message box
ax.fmt_xdata = DateFormatter('%Y-%m-%d')
ax.fmt_ydata = lambda x: '$%1.2f' % x
ax.grid(True)
fig.autofmt_xdate()
pl.show()
| bsd-3-clause |
kcyu2014/ml_project2 | project2/utils/images2patches.py | 1 | 5530 | """ This module processes the dataset Massachusetts obained from https://www.cs.toronto.edu/~vmnih/data/.
The original dataset contains sattelite images of Massachusetts and its surroundings. The images are
ofsize 1500x1500, 3 channels, TIFF format. The dataset also contains separate labels for roads and
buildings. This mdule only processes the road dataset.
Processing: Adjusted for the needs of Project 2, Machine Learning course, EPFL (fall 2016). Since
the original sattelite images are of different zoom level and size than the dataset provided for the project,
it needs to be rescaled and cropped (both the sattelite image and its corresponding mask). From each original
image the non-overlaping patches are taken and only those that contain at least `maskWhitePxRatioTh` * 100
percent of roads are kept. The resulting patches are stored in `outputPath` directory.
"""
from PIL import Image
from matplotlib import pyplot as plt
import os
import numpy as np
########################################################################################################################
# INPUT PARAMETERS
########################################################################################################################
# Input dataset path.
inputPath = '../../../ext_data/massachusetts/original/'
outputPath = '../../../ext_data/massachusetts/patches/'
mapDir = 'map'
satDir = 'sat'
# Threshold for all-white parts of the sattelite images - ratio of white pixels (intensity == 255). If the white/other
# ratio is higher than this threshold, the image is dropped.
whitePxRatioTh = 0.001
# Threshold of roads vs. background within mask patch - if the roads/background ratio is lower then this threshold,
# the patch is dropped.
maskWhitePxRatioTh = 0.005
# Upscale image and mask ratio.
upscale = (2.0, 2.0)
patchSize = (400, 400)
########################################################################################################################
# MAIN SCRIPT
########################################################################################################################
imagesFiles = [im for im in os.listdir(inputPath + satDir) if im.endswith('.tiff')]
numFiles = len(imagesFiles)
for idx, imgFile in enumerate(imagesFiles):
print('Processing image {im} / {tot}'.format(im=idx + 1, tot=numFiles))
# Load satelite image.
img = Image.open(inputPath + satDir + '/' + imgFile)
assert(img.mode == 'RGB')
# Get image size.
imgSize = img.size
# Convert image to grayscale.
gsImg = img.convert(mode='L')
hist = gsImg.histogram()
whitePxRatio = float(hist[255]) / (imgSize[0] * imgSize[1])
# If the image contains no or insignificant white parts, process it further.
if whitePxRatio < whitePxRatioTh:
# Load ground truth road binary mask
try:
gtMask = Image.open(inputPath + mapDir + '/' + imgFile)
except:
print('Error: cannot open ground truth binary mask file {f}'.format(f=inputPath + mapDir + '/' + imgFile))
continue
# Check that mask's size matches the corresponding image.
assert(gtMask.size == imgSize)
# Upscale the image and the mask. For upsampling, nearest neighbour (NEAREST) is used.
# Another possible option is BICUBIC (only for sattelite img), which, however, blurs the image. We need to experiment
# to find out which one is better.
newSize = (int(imgSize[0] * upscale[0]), int(imgSize[1] * upscale[1]))
imgSize = newSize
# Check that at least one patch can fit in the original image.
assert(newSize[0] // patchSize[0] > 0)
assert(newSize[1] // patchSize[1] > 0)
img = img.resize(newSize, resample=Image.NEAREST)
gtMask = gtMask.resize(newSize, resample=Image.NEAREST)
# Generate x,y coordinates of centers of patches.
left = 0
right = imgSize[0] - patchSize[0]
top = 0
bottom = imgSize[1] - patchSize[1]
numPatchesInRow = imgSize[0] // patchSize[0]
numPatchesInCol = imgSize[1] // patchSize[1]
centersInRow = np.linspace(left, right, numPatchesInRow, dtype=np.int32)
centersInCol = np.linspace(top, bottom, numPatchesInCol, dtype=np.int32)
# Coordinates of patches (left, top, right, bottom)
patchesCoords = [(l, t, l + patchSize[0], t + patchSize[1]) for t in centersInCol for l in centersInRow]
# Process each patch
for pc in patchesCoords:
# Get a patch of img and mask.
patchMask = gtMask.crop(pc)
patchImg = img.crop(pc)
# Check correct size of a patch.
assert(patchMask.size == patchSize)
# Find the ratio of white pixels (roads) to black pixels (background).
patchMaskHist = patchMask.histogram()
maskWhitePxRatio = float(patchMaskHist[255]) / (patchSize[0] * patchSize[1])
# Check whether there is sufficient amount of roads in this patch and if so, save the patch (img and mask).
if maskWhitePxRatio > maskWhitePxRatioTh:
nameSuffix = '_(' + str(pc[1] + patchSize[1] // 2) + ', ' + str(pc[0] + patchSize[0] // 2) + ')'
name = imgFile[:-5] + nameSuffix + '.tiff'
patchImg.save(outputPath + satDir + '/' + name)
patchMask.save(outputPath + mapDir + '/' + name)
| mit |
stggh/PyAbel | doc/tools/smoothstep.py | 3 | 1356 | from __future__ import print_function, division
import numpy as np
import matplotlib.pyplot as plt
n = 12
rmin = 2.5
rmax = 9.5
w = 1
r = np.linspace(0, n, n * 20)
def smoothstep(r):
if r < rmin - w or r > rmax + w:
return 0
elif r < rmin + w:
t = (r - (rmin - w)) / (2 * w)
return t**2 * (3 - 2 * t)
elif r > rmax - w:
t = ((rmax + w) - r) / (2 * w)
return t**2 * (3 - 2 * t)
else: # rmin + w < r < rmax + w
return 1
fig = plt.figure(figsize=(6, 2), frameon=False)
ax = plt.gca()
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
plt.xlim((0, n))
plt.xticks([0, rmin, rmax], ['$0$', r'$r_{\rm min}$', r'$r_{\rm max}$'])
plt.ylim((0, 1.1))
plt.ylim(bottom=0)
plt.yticks([0, 0.5, 1], ['$0$', '$A/2$', '$A$'])
plt.vlines([rmin - w, rmin, rmin + w], 0, 1, color='lightgray')
plt.vlines([rmax - w, rmax, rmax + w], 0, 1, color='lightgray')
plt.hlines([0.5, 1], 0, rmax + w, color='lightgray')
textprm = {'horizontalalignment': 'center',
'verticalalignment': 'bottom'}
plt.text(rmin - w/2, 1, '$w$', textprm)
plt.text(rmin + w/2, 1, '$w$', textprm)
plt.text(rmax - w/2, 1, '$w$', textprm)
plt.text(rmax + w/2, 1, '$w$', textprm)
plt.plot(r, [smoothstep(ri) for ri in r], color='red')
plt.tight_layout()
#plt.show()
#plt.savefig('smoothstep.svg')
| mit |
ZENGXH/scikit-learn | sklearn/manifold/tests/test_locally_linear.py | 232 | 4761 | from itertools import product
from nose.tools import assert_true
import numpy as np
from numpy.testing import assert_almost_equal, assert_array_almost_equal
from scipy import linalg
from sklearn import neighbors, manifold
from sklearn.manifold.locally_linear import barycenter_kneighbors_graph
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import ignore_warnings
eigen_solvers = ['dense', 'arpack']
#----------------------------------------------------------------------
# Test utility routines
def test_barycenter_kneighbors_graph():
X = np.array([[0, 1], [1.01, 1.], [2, 0]])
A = barycenter_kneighbors_graph(X, 1)
assert_array_almost_equal(
A.toarray(),
[[0., 1., 0.],
[1., 0., 0.],
[0., 1., 0.]])
A = barycenter_kneighbors_graph(X, 2)
# check that columns sum to one
assert_array_almost_equal(np.sum(A.toarray(), 1), np.ones(3))
pred = np.dot(A.toarray(), X)
assert_less(linalg.norm(pred - X) / X.shape[0], 1)
#----------------------------------------------------------------------
# Test LLE by computing the reconstruction error on some manifolds.
def test_lle_simple_grid():
# note: ARPACK is numerically unstable, so this test will fail for
# some random seeds. We choose 2 because the tests pass.
rng = np.random.RandomState(2)
tol = 0.1
# grid of equidistant points in 2D, n_components = n_dim
X = np.array(list(product(range(5), repeat=2)))
X = X + 1e-10 * rng.uniform(size=X.shape)
n_components = 2
clf = manifold.LocallyLinearEmbedding(n_neighbors=5,
n_components=n_components,
random_state=rng)
tol = 0.1
N = barycenter_kneighbors_graph(X, clf.n_neighbors).toarray()
reconstruction_error = linalg.norm(np.dot(N, X) - X, 'fro')
assert_less(reconstruction_error, tol)
for solver in eigen_solvers:
clf.set_params(eigen_solver=solver)
clf.fit(X)
assert_true(clf.embedding_.shape[1] == n_components)
reconstruction_error = linalg.norm(
np.dot(N, clf.embedding_) - clf.embedding_, 'fro') ** 2
assert_less(reconstruction_error, tol)
assert_almost_equal(clf.reconstruction_error_,
reconstruction_error, decimal=1)
# re-embed a noisy version of X using the transform method
noise = rng.randn(*X.shape) / 100
X_reembedded = clf.transform(X + noise)
assert_less(linalg.norm(X_reembedded - clf.embedding_), tol)
def test_lle_manifold():
rng = np.random.RandomState(0)
# similar test on a slightly more complex manifold
X = np.array(list(product(np.arange(18), repeat=2)))
X = np.c_[X, X[:, 0] ** 2 / 18]
X = X + 1e-10 * rng.uniform(size=X.shape)
n_components = 2
for method in ["standard", "hessian", "modified", "ltsa"]:
clf = manifold.LocallyLinearEmbedding(n_neighbors=6,
n_components=n_components,
method=method, random_state=0)
tol = 1.5 if method == "standard" else 3
N = barycenter_kneighbors_graph(X, clf.n_neighbors).toarray()
reconstruction_error = linalg.norm(np.dot(N, X) - X)
assert_less(reconstruction_error, tol)
for solver in eigen_solvers:
clf.set_params(eigen_solver=solver)
clf.fit(X)
assert_true(clf.embedding_.shape[1] == n_components)
reconstruction_error = linalg.norm(
np.dot(N, clf.embedding_) - clf.embedding_, 'fro') ** 2
details = ("solver: %s, method: %s" % (solver, method))
assert_less(reconstruction_error, tol, msg=details)
assert_less(np.abs(clf.reconstruction_error_ -
reconstruction_error),
tol * reconstruction_error, msg=details)
def test_pipeline():
# check that LocallyLinearEmbedding works fine as a Pipeline
# only checks that no error is raised.
# TODO check that it actually does something useful
from sklearn import pipeline, datasets
X, y = datasets.make_blobs(random_state=0)
clf = pipeline.Pipeline(
[('filter', manifold.LocallyLinearEmbedding(random_state=0)),
('clf', neighbors.KNeighborsClassifier())])
clf.fit(X, y)
assert_less(.9, clf.score(X, y))
# Test the error raised when the weight matrix is singular
def test_singular_matrix():
from nose.tools import assert_raises
M = np.ones((10, 3))
f = ignore_warnings
assert_raises(ValueError, f(manifold.locally_linear_embedding),
M, 2, 1, method='standard', eigen_solver='arpack')
| bsd-3-clause |
vivekmishra1991/scikit-learn | examples/cluster/plot_lena_segmentation.py | 271 | 2444 | """
=========================================
Segmenting the picture of Lena in regions
=========================================
This example uses :ref:`spectral_clustering` on a graph created from
voxel-to-voxel difference on an image to break this image into multiple
partly-homogeneous regions.
This procedure (spectral clustering on an image) is an efficient
approximate solution for finding normalized graph cuts.
There are two options to assign labels:
* with 'kmeans' spectral clustering will cluster samples in the embedding space
using a kmeans algorithm
* whereas 'discrete' will iteratively search for the closest partition
space to the embedding space.
"""
print(__doc__)
# Author: Gael Varoquaux <[email protected]>, Brian Cheung
# License: BSD 3 clause
import time
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
from sklearn.feature_extraction import image
from sklearn.cluster import spectral_clustering
lena = sp.misc.lena()
# Downsample the image by a factor of 4
lena = lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2] + lena[1::2, 1::2]
lena = lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2] + lena[1::2, 1::2]
# Convert the image into a graph with the value of the gradient on the
# edges.
graph = image.img_to_graph(lena)
# Take a decreasing function of the gradient: an exponential
# The smaller beta is, the more independent the segmentation is of the
# actual image. For beta=1, the segmentation is close to a voronoi
beta = 5
eps = 1e-6
graph.data = np.exp(-beta * graph.data / lena.std()) + eps
# Apply spectral clustering (this step goes much faster if you have pyamg
# installed)
N_REGIONS = 11
###############################################################################
# Visualize the resulting regions
for assign_labels in ('kmeans', 'discretize'):
t0 = time.time()
labels = spectral_clustering(graph, n_clusters=N_REGIONS,
assign_labels=assign_labels,
random_state=1)
t1 = time.time()
labels = labels.reshape(lena.shape)
plt.figure(figsize=(5, 5))
plt.imshow(lena, cmap=plt.cm.gray)
for l in range(N_REGIONS):
plt.contour(labels == l, contours=1,
colors=[plt.cm.spectral(l / float(N_REGIONS)), ])
plt.xticks(())
plt.yticks(())
plt.title('Spectral clustering: %s, %.2fs' % (assign_labels, (t1 - t0)))
plt.show()
| bsd-3-clause |
botswana-harvard/bcpp-export | bcpp_export/old_export/working/20161012.py | 1 | 6412 | import pandas as pd
import numpy as np
from bcpp_export.constants import hiv_options
from bcpp_export.dataframes.longitudinal_subjects import LongitudinalSubjects
pd.set_option('display.width', None)
# load saved files (generated using bcpp_export)
df_s1 = pd.read_csv('/Users/erikvw/Documents/bcpp/cdc/20161007/bcpp_export_year1_20161006_results.csv', low_memory=False)
df_s1['appointment__visit_definition__code'] = 'T0'
df_s1 = df_s1.query('pair >= 1 and pair <= 12 and intervention == True').drop_duplicates('subject_identifier')
df_s2 = pd.read_csv('/Users/erikvw/Documents/bcpp/cdc/20161007/bcpp_export_year2_20161006_results.csv', low_memory=False)
# df_s2 = df_s2.rename(columns={'appointment__visit_definition__code': 'timepoint'})
df_s2 = df_s2.query('pair >= 1 and pair <= 12 and intervention == True').drop_duplicates('subject_identifier')
# merge to create Y2 dataframe and run through class to calculate derived vars
df = pd.merge(df_s2, df_s1, how='left', on='subject_identifier', suffixes=['', '_y1'])
subjects = LongitudinalSubjects(df)
df = subjects.df
# override two values for final_arv_status (agreed w/ Kara and Kathleen)
df.loc[df['subject_identifier'] == '066-19260006-2', 'final_arv_status'] = 2.0
df.loc[df['subject_identifier'] == '066-31310013-6', 'final_arv_status'] = 2.0
# use same export columns as before
subjects_columns = [
'age_in_years', 'arv_clinic', 'cd4_date', 'cd4_tested', 'cd4_value',
'circumcised', 'community', 'consent_date', 'final_arv_status', 'final_hiv_status',
'final_hiv_status_date', 'gender', 'identity', 'identity256', 'pregnant',
'prev_result_known', 'prev_result', 'prev_result_date', 'referred', 'self_reported_result',
'subject_identifier', 'timepoint', 'survey', 'pair', 'timestamp']
# write final file
path_or_buf_y2 = '/Users/erikvw/Documents/bcpp/cdc/20161007/df_tmp20161010F_y2.csv'
df.to_csv(columns=subjects_columns, path_or_buf=path_or_buf_y2, index=False)
df_s1 = pd.read_csv('/Users/erikvw/Documents/bcpp/cdc/20161007/bcpp_export_year1_20161006_results.csv', low_memory=False)
df_s1 = df_s1[df_s1['intervention'] == True]
path_or_buf_y1 = '/Users/erikvw/Documents/bcpp/cdc/20161007/df_tmp20161010E_y1.csv'
df_s1.to_csv(columns=subjects_columns, path_or_buf=path_or_buf_y1, index=False)
df_y1 = pd.read_csv(path_or_buf_y1, low_memory=False)
df_y1['timepoint'] = 'T0'
df_y2 = pd.read_csv(path_or_buf_y2, low_memory=False)
df_final = pd.concat([df_y1, df_y2])
path_or_buf_final = '/Users/erikvw/Documents/bcpp/cdc/20161007/bcpp_subjects_20161012_pairs1-12.csv'
df_final.to_csv(path_or_buf=path_or_buf_final, index=False)
# jean
df_jl = pd.read_csv('/Users/erikvw/Documents/bcpp/cdc/20161007/bcpp_subject_jean_y2_t1.csv', low_memory=True)
df_jl['hiv_result_datetime'] = df_jl['hiv_result_datetime'].str[0:2] + '-' + df_jl['hiv_result_datetime'].str[2:5] + '-' + df_jl['hiv_result_datetime'].str[5:9]
df_jl['hiv_result_datetime'] = pd.to_datetime(df_jl['hiv_result_datetime'])
df_jl['prev_result_date'] = df_jl['prev_result_date'].str[0:2] + '-' + df_jl['prev_result_date'].str[2:5] + '-' + df_jl['prev_result_date'].str[5:9]
df_jl['prev_result_date'] = pd.to_datetime(df_jl['prev_result_date'])
df_jl = df_jl.rename(columns={
'hiv_result_datetime': 'final_hiv_status_date',
'Pair': 'pair',
'Intervention': 'intervention',
'survey_slug': 'survey'})
arv_options_jl = {
'ARV naive': 1.0,
'ARV defaulter': 2.0,
'On ARVs': 3.0}
intervention_options_jl = {
'CPC': 1.0,
'ECC': 0.0}
df_jl['prev_result'] = df_jl['prev_result'].map(hiv_options.get)
df_jl['final_arv_status'] = df_jl['final_arv_status'].map(arv_options_jl.get)
df_jl['intervention'] = df_jl['intervention'].map(intervention_options_jl.get)
['age_in_years',
'arv_clinic',
'cd4_date',
'cd4_tested',
'cd4_value',
'circumcised',
'community',
'consent_date',
'final_arv_status',
'final_hiv_status',
'final_hiv_status_date',
'gender',
'identity',
'identity256',
'pair',
'pregnant',
'prev_result',
'prev_result_date',
'prev_result_known',
'referred',
'self_reported_result',
'subject_identifier',
'survey',
'timepoint',
'timestamp']
['final_arv_status',
'final_hiv_status',
'final_hiv_status_date',
'pair',
'prev_result',
'prev_result_date',
'prev_result_known',
'subject_identifier',
'survey',
'timepoint']
# viral load
# file from Moyo, 2016-10-12
df_vl = pd.read_csv('/Users/erikvw/Documents/bcpp/cdc/20161007/bhp066_auvl_all.csv', low_memory=True)
# fix datetimes
df_vl['sample_date_drawn'] = pd.to_datetime(df_vl['sample_date_drawn'])
df_vl['date_received'] = pd.to_datetime(df_vl['date_received'])
df_vl['sample_assay_date'] = pd.to_datetime(df_vl['sample_assay_date'])
df_vl['report_date'] = pd.to_datetime(df_vl['reportDate'])
df_vl = df_vl.drop('reportDate', axis=1)
df_vl['specimen_identifier'] = df_vl['edc_specimen_identifier'].str.slice(0, 12)
df_vl = df_vl.rename(columns={'edc_specimen_identifier': 'aliquot_identifier', 'RESULT': 'result'})
df_vl = df_vl[pd.notnull(df_vl['specimen_identifier'])]
# file from bcpp-rdb (through runserver)
df_req = pd.read_csv('/Users/erikvw/Documents/bcpp/cdc/20161007/bcpp_requisitions_2016-10-11.csv', low_memory=True)
df_req = df_req[pd.notnull(df_req['specimen_identifier'])]
df_vl2 = pd.merge(df_vl, df_req, on=['specimen_identifier'], how='left', suffixes=['', '_vl'])
df_vl2 = df_vl2[pd.notnull(df_vl2['result'])]
df_vl2 = df_vl2[df_vl2['result'] != '*']
# df_vl2['dupl'] = df_vl2.duplicated(['subject_identifier', 'specimen_identifier']) & (df_vl2['code'] == 'T1')
# df_vl2['dupl'] = df_vl2.duplicated(['subject_identifier', 'specimen_identifier']) & (df_vl2['code'] == 'T0') & (df_vl2['dupl'] == False)
# df_vl2 = df_vl2[df_vl2['dupl']==False]
# df_vl2 = df_vl2.drop('dupl', axis=1)
df_vl2 = df_vl2.rename(columns={'code': 'timepoint'})
df_vl2 = df_vl2.sort_values(['subject_identifier', 'timepoint', 'report_date'])
df_vl2 = df_vl2.drop_duplicates(['subject_identifier', 'timepoint', 'report_date'], keep='last')
df_vl2 = df_vl2.drop_duplicates(['subject_identifier', 'timepoint'], keep='last') # 4308 rows x 20 columns
# returns an empty series
# df_vl2[df_vl2.duplicated(['subject_identifier', 'code'])]
df = pd.merge(df, df_vl2[['subject_identifier', 'timepoint', 'result', 'result_quantifier', 'sample_assay_date', 'is_drawn']],
how='left', on=['subject_identifier', 'timepoint'], suffixes=['', '_vl'])
| gpl-2.0 |
MJuddBooth/pandas | pandas/tests/frame/test_apply.py | 1 | 45389 | # -*- coding: utf-8 -*-
from __future__ import print_function
from collections import OrderedDict
from datetime import datetime
from itertools import chain
import operator
import warnings
import numpy as np
import pytest
from pandas.core.dtypes.dtypes import CategoricalDtype
import pandas as pd
from pandas import (
DataFrame, MultiIndex, Series, Timestamp, compat, date_range, notna)
from pandas.conftest import _get_cython_table_params
from pandas.core.apply import frame_apply
import pandas.util.testing as tm
from pandas.util.testing import assert_frame_equal, assert_series_equal
@pytest.fixture
def int_frame_const_col():
"""
Fixture for DataFrame of ints which are constant per column
Columns are ['A', 'B', 'C'], with values (per column): [1, 2, 3]
"""
df = DataFrame(np.tile(np.arange(3, dtype='int64'), 6).reshape(6, -1) + 1,
columns=['A', 'B', 'C'])
return df
class TestDataFrameApply():
def test_apply(self, float_frame):
with np.errstate(all='ignore'):
# ufunc
applied = float_frame.apply(np.sqrt)
tm.assert_series_equal(np.sqrt(float_frame['A']), applied['A'])
# aggregator
applied = float_frame.apply(np.mean)
assert applied['A'] == np.mean(float_frame['A'])
d = float_frame.index[0]
applied = float_frame.apply(np.mean, axis=1)
assert applied[d] == np.mean(float_frame.xs(d))
assert applied.index is float_frame.index # want this
# invalid axis
df = DataFrame(
[[1, 2, 3], [4, 5, 6], [7, 8, 9]], index=['a', 'a', 'c'])
with pytest.raises(ValueError):
df.apply(lambda x: x, 2)
# GH 9573
df = DataFrame({'c0': ['A', 'A', 'B', 'B'],
'c1': ['C', 'C', 'D', 'D']})
df = df.apply(lambda ts: ts.astype('category'))
assert df.shape == (4, 2)
assert isinstance(df['c0'].dtype, CategoricalDtype)
assert isinstance(df['c1'].dtype, CategoricalDtype)
def test_apply_mixed_datetimelike(self):
# mixed datetimelike
# GH 7778
df = DataFrame({'A': date_range('20130101', periods=3),
'B': pd.to_timedelta(np.arange(3), unit='s')})
result = df.apply(lambda x: x, axis=1)
assert_frame_equal(result, df)
def test_apply_empty(self, float_frame):
# empty
empty_frame = DataFrame()
applied = empty_frame.apply(np.sqrt)
assert applied.empty
applied = empty_frame.apply(np.mean)
assert applied.empty
no_rows = float_frame[:0]
result = no_rows.apply(lambda x: x.mean())
expected = Series(np.nan, index=float_frame.columns)
assert_series_equal(result, expected)
no_cols = float_frame.loc[:, []]
result = no_cols.apply(lambda x: x.mean(), axis=1)
expected = Series(np.nan, index=float_frame.index)
assert_series_equal(result, expected)
# GH 2476
expected = DataFrame(index=['a'])
result = expected.apply(lambda x: x['a'], axis=1)
assert_frame_equal(expected, result)
def test_apply_with_reduce_empty(self):
# reduce with an empty DataFrame
empty_frame = DataFrame()
x = []
result = empty_frame.apply(x.append, axis=1, result_type='expand')
assert_frame_equal(result, empty_frame)
result = empty_frame.apply(x.append, axis=1, result_type='reduce')
assert_series_equal(result, Series(
[], index=pd.Index([], dtype=object)))
empty_with_cols = DataFrame(columns=['a', 'b', 'c'])
result = empty_with_cols.apply(x.append, axis=1, result_type='expand')
assert_frame_equal(result, empty_with_cols)
result = empty_with_cols.apply(x.append, axis=1, result_type='reduce')
assert_series_equal(result, Series(
[], index=pd.Index([], dtype=object)))
# Ensure that x.append hasn't been called
assert x == []
def test_apply_deprecate_reduce(self):
empty_frame = DataFrame()
x = []
with tm.assert_produces_warning(FutureWarning):
empty_frame.apply(x.append, axis=1, reduce=True)
def test_apply_standard_nonunique(self):
df = DataFrame(
[[1, 2, 3], [4, 5, 6], [7, 8, 9]], index=['a', 'a', 'c'])
result = df.apply(lambda s: s[0], axis=1)
expected = Series([1, 4, 7], ['a', 'a', 'c'])
assert_series_equal(result, expected)
result = df.T.apply(lambda s: s[0], axis=0)
assert_series_equal(result, expected)
@pytest.mark.parametrize('func', ['sum', 'mean', 'min', 'max', 'std'])
@pytest.mark.parametrize('args,kwds', [
pytest.param([], {}, id='no_args_or_kwds'),
pytest.param([1], {}, id='axis_from_args'),
pytest.param([], {'axis': 1}, id='axis_from_kwds'),
pytest.param([], {'numeric_only': True}, id='optional_kwds'),
pytest.param([1, None], {'numeric_only': True}, id='args_and_kwds')
])
def test_apply_with_string_funcs(self, float_frame, func, args, kwds):
result = float_frame.apply(func, *args, **kwds)
expected = getattr(float_frame, func)(*args, **kwds)
tm.assert_series_equal(result, expected)
def test_apply_broadcast_deprecated(self, float_frame):
with tm.assert_produces_warning(FutureWarning):
float_frame.apply(np.mean, broadcast=True)
def test_apply_broadcast(self, float_frame, int_frame_const_col):
# scalars
result = float_frame.apply(np.mean, result_type='broadcast')
expected = DataFrame([float_frame.mean()], index=float_frame.index)
tm.assert_frame_equal(result, expected)
result = float_frame.apply(np.mean, axis=1, result_type='broadcast')
m = float_frame.mean(axis=1)
expected = DataFrame({c: m for c in float_frame.columns})
tm.assert_frame_equal(result, expected)
# lists
result = float_frame.apply(
lambda x: list(range(len(float_frame.columns))),
axis=1,
result_type='broadcast')
m = list(range(len(float_frame.columns)))
expected = DataFrame([m] * len(float_frame.index),
dtype='float64',
index=float_frame.index,
columns=float_frame.columns)
tm.assert_frame_equal(result, expected)
result = float_frame.apply(lambda x:
list(range(len(float_frame.index))),
result_type='broadcast')
m = list(range(len(float_frame.index)))
expected = DataFrame({c: m for c in float_frame.columns},
dtype='float64',
index=float_frame.index)
tm.assert_frame_equal(result, expected)
# preserve columns
df = int_frame_const_col
result = df.apply(lambda x: [1, 2, 3], axis=1, result_type='broadcast')
tm.assert_frame_equal(result, df)
df = int_frame_const_col
result = df.apply(lambda x: Series([1, 2, 3], index=list('abc')),
axis=1, result_type='broadcast')
expected = df.copy()
tm.assert_frame_equal(result, expected)
def test_apply_broadcast_error(self, int_frame_const_col):
df = int_frame_const_col
# > 1 ndim
with pytest.raises(ValueError):
df.apply(lambda x: np.array([1, 2]).reshape(-1, 2),
axis=1, result_type='broadcast')
# cannot broadcast
with pytest.raises(ValueError):
df.apply(lambda x: [1, 2], axis=1, result_type='broadcast')
with pytest.raises(ValueError):
df.apply(lambda x: Series([1, 2]), axis=1, result_type='broadcast')
def test_apply_raw(self, float_frame):
result0 = float_frame.apply(np.mean, raw=True)
result1 = float_frame.apply(np.mean, axis=1, raw=True)
expected0 = float_frame.apply(lambda x: x.values.mean())
expected1 = float_frame.apply(lambda x: x.values.mean(), axis=1)
assert_series_equal(result0, expected0)
assert_series_equal(result1, expected1)
# no reduction
result = float_frame.apply(lambda x: x * 2, raw=True)
expected = float_frame * 2
assert_frame_equal(result, expected)
def test_apply_axis1(self, float_frame):
d = float_frame.index[0]
tapplied = float_frame.apply(np.mean, axis=1)
assert tapplied[d] == np.mean(float_frame.xs(d))
def test_apply_ignore_failures(self, float_string_frame):
result = frame_apply(float_string_frame, np.mean, 0,
ignore_failures=True).apply_standard()
expected = float_string_frame._get_numeric_data().apply(np.mean)
assert_series_equal(result, expected)
def test_apply_mixed_dtype_corner(self):
df = DataFrame({'A': ['foo'],
'B': [1.]})
result = df[:0].apply(np.mean, axis=1)
# the result here is actually kind of ambiguous, should it be a Series
# or a DataFrame?
expected = Series(np.nan, index=pd.Index([], dtype='int64'))
assert_series_equal(result, expected)
df = DataFrame({'A': ['foo'],
'B': [1.]})
result = df.apply(lambda x: x['A'], axis=1)
expected = Series(['foo'], index=[0])
assert_series_equal(result, expected)
result = df.apply(lambda x: x['B'], axis=1)
expected = Series([1.], index=[0])
assert_series_equal(result, expected)
def test_apply_empty_infer_type(self):
no_cols = DataFrame(index=['a', 'b', 'c'])
no_index = DataFrame(columns=['a', 'b', 'c'])
def _check(df, f):
with warnings.catch_warnings(record=True):
warnings.simplefilter("ignore", RuntimeWarning)
test_res = f(np.array([], dtype='f8'))
is_reduction = not isinstance(test_res, np.ndarray)
def _checkit(axis=0, raw=False):
result = df.apply(f, axis=axis, raw=raw)
if is_reduction:
agg_axis = df._get_agg_axis(axis)
assert isinstance(result, Series)
assert result.index is agg_axis
else:
assert isinstance(result, DataFrame)
_checkit()
_checkit(axis=1)
_checkit(raw=True)
_checkit(axis=0, raw=True)
with np.errstate(all='ignore'):
_check(no_cols, lambda x: x)
_check(no_cols, lambda x: x.mean())
_check(no_index, lambda x: x)
_check(no_index, lambda x: x.mean())
result = no_cols.apply(lambda x: x.mean(), result_type='broadcast')
assert isinstance(result, DataFrame)
def test_apply_with_args_kwds(self, float_frame):
def add_some(x, howmuch=0):
return x + howmuch
def agg_and_add(x, howmuch=0):
return x.mean() + howmuch
def subtract_and_divide(x, sub, divide=1):
return (x - sub) / divide
result = float_frame.apply(add_some, howmuch=2)
expected = float_frame.apply(lambda x: x + 2)
assert_frame_equal(result, expected)
result = float_frame.apply(agg_and_add, howmuch=2)
expected = float_frame.apply(lambda x: x.mean() + 2)
assert_series_equal(result, expected)
result = float_frame.apply(subtract_and_divide, args=(2,), divide=2)
expected = float_frame.apply(lambda x: (x - 2.) / 2.)
assert_frame_equal(result, expected)
def test_apply_yield_list(self, float_frame):
result = float_frame.apply(list)
assert_frame_equal(result, float_frame)
def test_apply_reduce_Series(self, float_frame):
float_frame.loc[::2, 'A'] = np.nan
expected = float_frame.mean(1)
result = float_frame.apply(np.mean, axis=1)
assert_series_equal(result, expected)
def test_apply_reduce_rows_to_dict(self):
# GH 25196
data = pd.DataFrame([[1, 2], [3, 4]])
expected = pd.Series([{0: 1, 1: 3}, {0: 2, 1: 4}])
result = data.apply(dict)
assert_series_equal(result, expected)
def test_apply_differently_indexed(self):
df = DataFrame(np.random.randn(20, 10))
result0 = df.apply(Series.describe, axis=0)
expected0 = DataFrame({i: v.describe()
for i, v in compat.iteritems(df)},
columns=df.columns)
assert_frame_equal(result0, expected0)
result1 = df.apply(Series.describe, axis=1)
expected1 = DataFrame({i: v.describe()
for i, v in compat.iteritems(df.T)},
columns=df.index).T
assert_frame_equal(result1, expected1)
def test_apply_modify_traceback(self):
data = DataFrame({'A': ['foo', 'foo', 'foo', 'foo',
'bar', 'bar', 'bar', 'bar',
'foo', 'foo', 'foo'],
'B': ['one', 'one', 'one', 'two',
'one', 'one', 'one', 'two',
'two', 'two', 'one'],
'C': ['dull', 'dull', 'shiny', 'dull',
'dull', 'shiny', 'shiny', 'dull',
'shiny', 'shiny', 'shiny'],
'D': np.random.randn(11),
'E': np.random.randn(11),
'F': np.random.randn(11)})
data.loc[4, 'C'] = np.nan
def transform(row):
if row['C'].startswith('shin') and row['A'] == 'foo':
row['D'] = 7
return row
def transform2(row):
if (notna(row['C']) and row['C'].startswith('shin') and
row['A'] == 'foo'):
row['D'] = 7
return row
try:
data.apply(transform, axis=1)
except AttributeError as e:
assert len(e.args) == 2
assert e.args[1] == 'occurred at index 4'
assert e.args[0] == "'float' object has no attribute 'startswith'"
def test_apply_bug(self):
# GH 6125
positions = pd.DataFrame([[1, 'ABC0', 50], [1, 'YUM0', 20],
[1, 'DEF0', 20], [2, 'ABC1', 50],
[2, 'YUM1', 20], [2, 'DEF1', 20]],
columns=['a', 'market', 'position'])
def f(r):
return r['market']
expected = positions.apply(f, axis=1)
positions = DataFrame([[datetime(2013, 1, 1), 'ABC0', 50],
[datetime(2013, 1, 2), 'YUM0', 20],
[datetime(2013, 1, 3), 'DEF0', 20],
[datetime(2013, 1, 4), 'ABC1', 50],
[datetime(2013, 1, 5), 'YUM1', 20],
[datetime(2013, 1, 6), 'DEF1', 20]],
columns=['a', 'market', 'position'])
result = positions.apply(f, axis=1)
assert_series_equal(result, expected)
def test_apply_convert_objects(self):
data = DataFrame({'A': ['foo', 'foo', 'foo', 'foo',
'bar', 'bar', 'bar', 'bar',
'foo', 'foo', 'foo'],
'B': ['one', 'one', 'one', 'two',
'one', 'one', 'one', 'two',
'two', 'two', 'one'],
'C': ['dull', 'dull', 'shiny', 'dull',
'dull', 'shiny', 'shiny', 'dull',
'shiny', 'shiny', 'shiny'],
'D': np.random.randn(11),
'E': np.random.randn(11),
'F': np.random.randn(11)})
result = data.apply(lambda x: x, axis=1)
assert_frame_equal(result._convert(datetime=True), data)
def test_apply_attach_name(self, float_frame):
result = float_frame.apply(lambda x: x.name)
expected = Series(float_frame.columns, index=float_frame.columns)
assert_series_equal(result, expected)
result = float_frame.apply(lambda x: x.name, axis=1)
expected = Series(float_frame.index, index=float_frame.index)
assert_series_equal(result, expected)
# non-reductions
result = float_frame.apply(lambda x: np.repeat(x.name, len(x)))
expected = DataFrame(np.tile(float_frame.columns,
(len(float_frame.index), 1)),
index=float_frame.index,
columns=float_frame.columns)
assert_frame_equal(result, expected)
result = float_frame.apply(lambda x: np.repeat(x.name, len(x)),
axis=1)
expected = Series(np.repeat(t[0], len(float_frame.columns))
for t in float_frame.itertuples())
expected.index = float_frame.index
assert_series_equal(result, expected)
def test_apply_multi_index(self, float_frame):
index = MultiIndex.from_arrays([['a', 'a', 'b'], ['c', 'd', 'd']])
s = DataFrame([[1, 2], [3, 4], [5, 6]],
index=index,
columns=['col1', 'col2'])
result = s.apply(
lambda x: Series({'min': min(x), 'max': max(x)}), 1)
expected = DataFrame([[1, 2], [3, 4], [5, 6]],
index=index,
columns=['min', 'max'])
assert_frame_equal(result, expected, check_like=True)
def test_apply_dict(self):
# GH 8735
A = DataFrame([['foo', 'bar'], ['spam', 'eggs']])
A_dicts = Series([dict([(0, 'foo'), (1, 'spam')]),
dict([(0, 'bar'), (1, 'eggs')])])
B = DataFrame([[0, 1], [2, 3]])
B_dicts = Series([dict([(0, 0), (1, 2)]), dict([(0, 1), (1, 3)])])
fn = lambda x: x.to_dict()
for df, dicts in [(A, A_dicts), (B, B_dicts)]:
reduce_true = df.apply(fn, result_type='reduce')
reduce_false = df.apply(fn, result_type='expand')
reduce_none = df.apply(fn)
assert_series_equal(reduce_true, dicts)
assert_frame_equal(reduce_false, df)
assert_series_equal(reduce_none, dicts)
def test_applymap(self, float_frame):
applied = float_frame.applymap(lambda x: x * 2)
tm.assert_frame_equal(applied, float_frame * 2)
float_frame.applymap(type)
# GH 465: function returning tuples
result = float_frame.applymap(lambda x: (x, x))
assert isinstance(result['A'][0], tuple)
# GH 2909: object conversion to float in constructor?
df = DataFrame(data=[1, 'a'])
result = df.applymap(lambda x: x)
assert result.dtypes[0] == object
df = DataFrame(data=[1., 'a'])
result = df.applymap(lambda x: x)
assert result.dtypes[0] == object
# GH 2786
df = DataFrame(np.random.random((3, 4)))
df2 = df.copy()
cols = ['a', 'a', 'a', 'a']
df.columns = cols
expected = df2.applymap(str)
expected.columns = cols
result = df.applymap(str)
tm.assert_frame_equal(result, expected)
# datetime/timedelta
df['datetime'] = Timestamp('20130101')
df['timedelta'] = pd.Timedelta('1 min')
result = df.applymap(str)
for f in ['datetime', 'timedelta']:
assert result.loc[0, f] == str(df.loc[0, f])
# GH 8222
empty_frames = [pd.DataFrame(),
pd.DataFrame(columns=list('ABC')),
pd.DataFrame(index=list('ABC')),
pd.DataFrame({'A': [], 'B': [], 'C': []})]
for frame in empty_frames:
for func in [round, lambda x: x]:
result = frame.applymap(func)
tm.assert_frame_equal(result, frame)
def test_applymap_box_timestamps(self):
# GH 2689, GH 2627
ser = pd.Series(date_range('1/1/2000', periods=10))
def func(x):
return (x.hour, x.day, x.month)
# it works!
pd.DataFrame(ser).applymap(func)
def test_applymap_box(self):
# ufunc will not be boxed. Same test cases as the test_map_box
df = pd.DataFrame({'a': [pd.Timestamp('2011-01-01'),
pd.Timestamp('2011-01-02')],
'b': [pd.Timestamp('2011-01-01', tz='US/Eastern'),
pd.Timestamp('2011-01-02', tz='US/Eastern')],
'c': [pd.Timedelta('1 days'),
pd.Timedelta('2 days')],
'd': [pd.Period('2011-01-01', freq='M'),
pd.Period('2011-01-02', freq='M')]})
result = df.applymap(lambda x: '{0}'.format(x.__class__.__name__))
expected = pd.DataFrame({'a': ['Timestamp', 'Timestamp'],
'b': ['Timestamp', 'Timestamp'],
'c': ['Timedelta', 'Timedelta'],
'd': ['Period', 'Period']})
tm.assert_frame_equal(result, expected)
def test_frame_apply_dont_convert_datetime64(self):
from pandas.tseries.offsets import BDay
df = DataFrame({'x1': [datetime(1996, 1, 1)]})
df = df.applymap(lambda x: x + BDay())
df = df.applymap(lambda x: x + BDay())
assert df.x1.dtype == 'M8[ns]'
def test_apply_non_numpy_dtype(self):
# GH 12244
df = DataFrame({'dt': pd.date_range(
"2015-01-01", periods=3, tz='Europe/Brussels')})
result = df.apply(lambda x: x)
assert_frame_equal(result, df)
result = df.apply(lambda x: x + pd.Timedelta('1day'))
expected = DataFrame({'dt': pd.date_range(
"2015-01-02", periods=3, tz='Europe/Brussels')})
assert_frame_equal(result, expected)
df = DataFrame({'dt': ['a', 'b', 'c', 'a']}, dtype='category')
result = df.apply(lambda x: x)
assert_frame_equal(result, df)
def test_apply_dup_names_multi_agg(self):
# GH 21063
df = pd.DataFrame([[0, 1], [2, 3]], columns=['a', 'a'])
expected = pd.DataFrame([[0, 1]], columns=['a', 'a'], index=['min'])
result = df.agg(['min'])
tm.assert_frame_equal(result, expected)
class TestInferOutputShape(object):
# the user has supplied an opaque UDF where
# they are transforming the input that requires
# us to infer the output
def test_infer_row_shape(self):
# GH 17437
# if row shape is changing, infer it
df = pd.DataFrame(np.random.rand(10, 2))
result = df.apply(np.fft.fft, axis=0)
assert result.shape == (10, 2)
result = df.apply(np.fft.rfft, axis=0)
assert result.shape == (6, 2)
def test_with_dictlike_columns(self):
# GH 17602
df = DataFrame([[1, 2], [1, 2]], columns=['a', 'b'])
result = df.apply(lambda x: {'s': x['a'] + x['b']},
axis=1)
expected = Series([{'s': 3} for t in df.itertuples()])
assert_series_equal(result, expected)
df['tm'] = [pd.Timestamp('2017-05-01 00:00:00'),
pd.Timestamp('2017-05-02 00:00:00')]
result = df.apply(lambda x: {'s': x['a'] + x['b']},
axis=1)
assert_series_equal(result, expected)
# compose a series
result = (df['a'] + df['b']).apply(lambda x: {'s': x})
expected = Series([{'s': 3}, {'s': 3}])
assert_series_equal(result, expected)
# GH 18775
df = DataFrame()
df["author"] = ["X", "Y", "Z"]
df["publisher"] = ["BBC", "NBC", "N24"]
df["date"] = pd.to_datetime(['17-10-2010 07:15:30',
'13-05-2011 08:20:35',
'15-01-2013 09:09:09'])
result = df.apply(lambda x: {}, axis=1)
expected = Series([{}, {}, {}])
assert_series_equal(result, expected)
def test_with_dictlike_columns_with_infer(self):
# GH 17602
df = DataFrame([[1, 2], [1, 2]], columns=['a', 'b'])
result = df.apply(lambda x: {'s': x['a'] + x['b']},
axis=1, result_type='expand')
expected = DataFrame({'s': [3, 3]})
assert_frame_equal(result, expected)
df['tm'] = [pd.Timestamp('2017-05-01 00:00:00'),
pd.Timestamp('2017-05-02 00:00:00')]
result = df.apply(lambda x: {'s': x['a'] + x['b']},
axis=1, result_type='expand')
assert_frame_equal(result, expected)
def test_with_listlike_columns(self):
# GH 17348
df = DataFrame({'a': Series(np.random.randn(4)),
'b': ['a', 'list', 'of', 'words'],
'ts': date_range('2016-10-01', periods=4, freq='H')})
result = df[['a', 'b']].apply(tuple, axis=1)
expected = Series([t[1:] for t in df[['a', 'b']].itertuples()])
assert_series_equal(result, expected)
result = df[['a', 'ts']].apply(tuple, axis=1)
expected = Series([t[1:] for t in df[['a', 'ts']].itertuples()])
assert_series_equal(result, expected)
# GH 18919
df = DataFrame({'x': Series([['a', 'b'], ['q']]),
'y': Series([['z'], ['q', 't']])})
df.index = MultiIndex.from_tuples([('i0', 'j0'), ('i1', 'j1')])
result = df.apply(
lambda row: [el for el in row['x'] if el in row['y']],
axis=1)
expected = Series([[], ['q']], index=df.index)
assert_series_equal(result, expected)
def test_infer_output_shape_columns(self):
# GH 18573
df = DataFrame({'number': [1., 2.],
'string': ['foo', 'bar'],
'datetime': [pd.Timestamp('2017-11-29 03:30:00'),
pd.Timestamp('2017-11-29 03:45:00')]})
result = df.apply(lambda row: (row.number, row.string), axis=1)
expected = Series([(t.number, t.string) for t in df.itertuples()])
assert_series_equal(result, expected)
def test_infer_output_shape_listlike_columns(self):
# GH 16353
df = DataFrame(np.random.randn(6, 3), columns=['A', 'B', 'C'])
result = df.apply(lambda x: [1, 2, 3], axis=1)
expected = Series([[1, 2, 3] for t in df.itertuples()])
assert_series_equal(result, expected)
result = df.apply(lambda x: [1, 2], axis=1)
expected = Series([[1, 2] for t in df.itertuples()])
assert_series_equal(result, expected)
# GH 17970
df = DataFrame({"a": [1, 2, 3]}, index=list('abc'))
result = df.apply(lambda row: np.ones(1), axis=1)
expected = Series([np.ones(1) for t in df.itertuples()],
index=df.index)
assert_series_equal(result, expected)
result = df.apply(lambda row: np.ones(2), axis=1)
expected = Series([np.ones(2) for t in df.itertuples()],
index=df.index)
assert_series_equal(result, expected)
# GH 17892
df = pd.DataFrame({'a': [pd.Timestamp('2010-02-01'),
pd.Timestamp('2010-02-04'),
pd.Timestamp('2010-02-05'),
pd.Timestamp('2010-02-06')],
'b': [9, 5, 4, 3],
'c': [5, 3, 4, 2],
'd': [1, 2, 3, 4]})
def fun(x):
return (1, 2)
result = df.apply(fun, axis=1)
expected = Series([(1, 2) for t in df.itertuples()])
assert_series_equal(result, expected)
def test_consistent_coerce_for_shapes(self):
# we want column names to NOT be propagated
# just because the shape matches the input shape
df = DataFrame(np.random.randn(4, 3), columns=['A', 'B', 'C'])
result = df.apply(lambda x: [1, 2, 3], axis=1)
expected = Series([[1, 2, 3] for t in df.itertuples()])
assert_series_equal(result, expected)
result = df.apply(lambda x: [1, 2], axis=1)
expected = Series([[1, 2] for t in df.itertuples()])
assert_series_equal(result, expected)
def test_consistent_names(self, int_frame_const_col):
# if a Series is returned, we should use the resulting index names
df = int_frame_const_col
result = df.apply(lambda x: Series([1, 2, 3],
index=['test', 'other', 'cols']),
axis=1)
expected = int_frame_const_col.rename(columns={'A': 'test',
'B': 'other',
'C': 'cols'})
assert_frame_equal(result, expected)
result = df.apply(lambda x: Series([1, 2], index=['test', 'other']),
axis=1)
expected = expected[['test', 'other']]
assert_frame_equal(result, expected)
def test_result_type(self, int_frame_const_col):
# result_type should be consistent no matter which
# path we take in the code
df = int_frame_const_col
result = df.apply(lambda x: [1, 2, 3], axis=1, result_type='expand')
expected = df.copy()
expected.columns = [0, 1, 2]
assert_frame_equal(result, expected)
result = df.apply(lambda x: [1, 2], axis=1, result_type='expand')
expected = df[['A', 'B']].copy()
expected.columns = [0, 1]
assert_frame_equal(result, expected)
# broadcast result
result = df.apply(lambda x: [1, 2, 3], axis=1, result_type='broadcast')
expected = df.copy()
assert_frame_equal(result, expected)
columns = ['other', 'col', 'names']
result = df.apply(lambda x: Series([1, 2, 3], index=columns),
axis=1, result_type='broadcast')
expected = df.copy()
assert_frame_equal(result, expected)
# series result
result = df.apply(lambda x: Series([1, 2, 3], index=x.index), axis=1)
expected = df.copy()
assert_frame_equal(result, expected)
# series result with other index
columns = ['other', 'col', 'names']
result = df.apply(lambda x: Series([1, 2, 3], index=columns), axis=1)
expected = df.copy()
expected.columns = columns
assert_frame_equal(result, expected)
@pytest.mark.parametrize("result_type", ['foo', 1])
def test_result_type_error(self, result_type, int_frame_const_col):
# allowed result_type
df = int_frame_const_col
with pytest.raises(ValueError):
df.apply(lambda x: [1, 2, 3], axis=1, result_type=result_type)
@pytest.mark.parametrize(
"box",
[lambda x: list(x),
lambda x: tuple(x),
lambda x: np.array(x, dtype='int64')],
ids=['list', 'tuple', 'array'])
def test_consistency_for_boxed(self, box, int_frame_const_col):
# passing an array or list should not affect the output shape
df = int_frame_const_col
result = df.apply(lambda x: box([1, 2]), axis=1)
expected = Series([box([1, 2]) for t in df.itertuples()])
assert_series_equal(result, expected)
result = df.apply(lambda x: box([1, 2]), axis=1, result_type='expand')
expected = int_frame_const_col[['A', 'B']].rename(columns={'A': 0,
'B': 1})
assert_frame_equal(result, expected)
def zip_frames(frames, axis=1):
"""
take a list of frames, zip them together under the
assumption that these all have the first frames' index/columns.
Returns
-------
new_frame : DataFrame
"""
if axis == 1:
columns = frames[0].columns
zipped = [f.loc[:, c] for c in columns for f in frames]
return pd.concat(zipped, axis=1)
else:
index = frames[0].index
zipped = [f.loc[i, :] for i in index for f in frames]
return pd.DataFrame(zipped)
class TestDataFrameAggregate():
def test_agg_transform(self, axis, float_frame):
other_axis = 1 if axis in {0, 'index'} else 0
with np.errstate(all='ignore'):
f_abs = np.abs(float_frame)
f_sqrt = np.sqrt(float_frame)
# ufunc
result = float_frame.transform(np.sqrt, axis=axis)
expected = f_sqrt.copy()
assert_frame_equal(result, expected)
result = float_frame.apply(np.sqrt, axis=axis)
assert_frame_equal(result, expected)
result = float_frame.transform(np.sqrt, axis=axis)
assert_frame_equal(result, expected)
# list-like
result = float_frame.apply([np.sqrt], axis=axis)
expected = f_sqrt.copy()
if axis in {0, 'index'}:
expected.columns = pd.MultiIndex.from_product(
[float_frame.columns, ['sqrt']])
else:
expected.index = pd.MultiIndex.from_product(
[float_frame.index, ['sqrt']])
assert_frame_equal(result, expected)
result = float_frame.transform([np.sqrt], axis=axis)
assert_frame_equal(result, expected)
# multiple items in list
# these are in the order as if we are applying both
# functions per series and then concatting
result = float_frame.apply([np.abs, np.sqrt], axis=axis)
expected = zip_frames([f_abs, f_sqrt], axis=other_axis)
if axis in {0, 'index'}:
expected.columns = pd.MultiIndex.from_product(
[float_frame.columns, ['absolute', 'sqrt']])
else:
expected.index = pd.MultiIndex.from_product(
[float_frame.index, ['absolute', 'sqrt']])
assert_frame_equal(result, expected)
result = float_frame.transform([np.abs, 'sqrt'], axis=axis)
assert_frame_equal(result, expected)
def test_transform_and_agg_err(self, axis, float_frame):
# cannot both transform and agg
with pytest.raises(ValueError):
float_frame.transform(['max', 'min'], axis=axis)
with pytest.raises(ValueError):
with np.errstate(all='ignore'):
float_frame.agg(['max', 'sqrt'], axis=axis)
with pytest.raises(ValueError):
with np.errstate(all='ignore'):
float_frame.transform(['max', 'sqrt'], axis=axis)
df = pd.DataFrame({'A': range(5), 'B': 5})
def f():
with np.errstate(all='ignore'):
df.agg({'A': ['abs', 'sum'], 'B': ['mean', 'max']}, axis=axis)
@pytest.mark.parametrize('method', [
'abs', 'shift', 'pct_change', 'cumsum', 'rank',
])
def test_transform_method_name(self, method):
# GH 19760
df = pd.DataFrame({"A": [-1, 2]})
result = df.transform(method)
expected = operator.methodcaller(method)(df)
tm.assert_frame_equal(result, expected)
def test_demo(self):
# demonstration tests
df = pd.DataFrame({'A': range(5), 'B': 5})
result = df.agg(['min', 'max'])
expected = DataFrame({'A': [0, 4], 'B': [5, 5]},
columns=['A', 'B'],
index=['min', 'max'])
tm.assert_frame_equal(result, expected)
result = df.agg({'A': ['min', 'max'], 'B': ['sum', 'max']})
expected = DataFrame({'A': [4.0, 0.0, np.nan],
'B': [5.0, np.nan, 25.0]},
columns=['A', 'B'],
index=['max', 'min', 'sum'])
tm.assert_frame_equal(result.reindex_like(expected), expected)
def test_agg_multiple_mixed_no_warning(self):
# GH 20909
mdf = pd.DataFrame({'A': [1, 2, 3],
'B': [1., 2., 3.],
'C': ['foo', 'bar', 'baz'],
'D': pd.date_range('20130101', periods=3)})
expected = pd.DataFrame({"A": [1, 6], 'B': [1.0, 6.0],
"C": ['bar', 'foobarbaz'],
"D": [pd.Timestamp('2013-01-01'), pd.NaT]},
index=['min', 'sum'])
# sorted index
with tm.assert_produces_warning(None):
result = mdf.agg(['min', 'sum'])
tm.assert_frame_equal(result, expected)
with tm.assert_produces_warning(None):
result = mdf[['D', 'C', 'B', 'A']].agg(['sum', 'min'])
# For backwards compatibility, the result's index is
# still sorted by function name, so it's ['min', 'sum']
# not ['sum', 'min'].
expected = expected[['D', 'C', 'B', 'A']]
tm.assert_frame_equal(result, expected)
def test_agg_dict_nested_renaming_depr(self):
df = pd.DataFrame({'A': range(5), 'B': 5})
# nested renaming
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
df.agg({'A': {'foo': 'min'},
'B': {'bar': 'max'}})
def test_agg_reduce(self, axis, float_frame):
other_axis = 1 if axis in {0, 'index'} else 0
name1, name2 = float_frame.axes[other_axis].unique()[:2].sort_values()
# all reducers
expected = pd.concat([float_frame.mean(axis=axis),
float_frame.max(axis=axis),
float_frame.sum(axis=axis),
], axis=1)
expected.columns = ['mean', 'max', 'sum']
expected = expected.T if axis in {0, 'index'} else expected
result = float_frame.agg(['mean', 'max', 'sum'], axis=axis)
assert_frame_equal(result, expected)
# dict input with scalars
func = OrderedDict([(name1, 'mean'), (name2, 'sum')])
result = float_frame.agg(func, axis=axis)
expected = Series([float_frame.loc(other_axis)[name1].mean(),
float_frame.loc(other_axis)[name2].sum()],
index=[name1, name2])
assert_series_equal(result, expected)
# dict input with lists
func = OrderedDict([(name1, ['mean']), (name2, ['sum'])])
result = float_frame.agg(func, axis=axis)
expected = DataFrame({
name1: Series([float_frame.loc(other_axis)[name1].mean()],
index=['mean']),
name2: Series([float_frame.loc(other_axis)[name2].sum()],
index=['sum'])})
expected = expected.T if axis in {1, 'columns'} else expected
assert_frame_equal(result, expected)
# dict input with lists with multiple
func = OrderedDict([(name1, ['mean', 'sum']), (name2, ['sum', 'max'])])
result = float_frame.agg(func, axis=axis)
expected = DataFrame(OrderedDict([
(name1, Series([float_frame.loc(other_axis)[name1].mean(),
float_frame.loc(other_axis)[name1].sum()],
index=['mean', 'sum'])),
(name2, Series([float_frame.loc(other_axis)[name2].sum(),
float_frame.loc(other_axis)[name2].max()],
index=['sum', 'max'])),
]))
expected = expected.T if axis in {1, 'columns'} else expected
assert_frame_equal(result, expected)
def test_nuiscance_columns(self):
# GH 15015
df = DataFrame({'A': [1, 2, 3],
'B': [1., 2., 3.],
'C': ['foo', 'bar', 'baz'],
'D': pd.date_range('20130101', periods=3)})
result = df.agg('min')
expected = Series([1, 1., 'bar', pd.Timestamp('20130101')],
index=df.columns)
assert_series_equal(result, expected)
result = df.agg(['min'])
expected = DataFrame([[1, 1., 'bar', pd.Timestamp('20130101')]],
index=['min'], columns=df.columns)
assert_frame_equal(result, expected)
result = df.agg('sum')
expected = Series([6, 6., 'foobarbaz'],
index=['A', 'B', 'C'])
assert_series_equal(result, expected)
result = df.agg(['sum'])
expected = DataFrame([[6, 6., 'foobarbaz']],
index=['sum'], columns=['A', 'B', 'C'])
assert_frame_equal(result, expected)
def test_non_callable_aggregates(self):
# GH 16405
# 'size' is a property of frame/series
# validate that this is working
df = DataFrame({'A': [None, 2, 3],
'B': [1.0, np.nan, 3.0],
'C': ['foo', None, 'bar']})
# Function aggregate
result = df.agg({'A': 'count'})
expected = Series({'A': 2})
assert_series_equal(result, expected)
# Non-function aggregate
result = df.agg({'A': 'size'})
expected = Series({'A': 3})
assert_series_equal(result, expected)
# Mix function and non-function aggs
result1 = df.agg(['count', 'size'])
result2 = df.agg({'A': ['count', 'size'],
'B': ['count', 'size'],
'C': ['count', 'size']})
expected = pd.DataFrame({'A': {'count': 2, 'size': 3},
'B': {'count': 2, 'size': 3},
'C': {'count': 2, 'size': 3}})
assert_frame_equal(result1, result2, check_like=True)
assert_frame_equal(result2, expected, check_like=True)
# Just functional string arg is same as calling df.arg()
result = df.agg('count')
expected = df.count()
assert_series_equal(result, expected)
# Just a string attribute arg same as calling df.arg
result = df.agg('size')
expected = df.size
assert result == expected
@pytest.mark.parametrize("df, func, expected", chain(
_get_cython_table_params(
DataFrame(), [
('sum', Series()),
('max', Series()),
('min', Series()),
('all', Series(dtype=bool)),
('any', Series(dtype=bool)),
('mean', Series()),
('prod', Series()),
('std', Series()),
('var', Series()),
('median', Series()),
]),
_get_cython_table_params(
DataFrame([[np.nan, 1], [1, 2]]), [
('sum', Series([1., 3])),
('max', Series([1., 2])),
('min', Series([1., 1])),
('all', Series([True, True])),
('any', Series([True, True])),
('mean', Series([1, 1.5])),
('prod', Series([1., 2])),
('std', Series([np.nan, 0.707107])),
('var', Series([np.nan, 0.5])),
('median', Series([1, 1.5])),
]),
))
def test_agg_cython_table(self, df, func, expected, axis):
# GH 21224
# test reducing functions in
# pandas.core.base.SelectionMixin._cython_table
result = df.agg(func, axis=axis)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("df, func, expected", chain(
_get_cython_table_params(
DataFrame(), [
('cumprod', DataFrame()),
('cumsum', DataFrame()),
]),
_get_cython_table_params(
DataFrame([[np.nan, 1], [1, 2]]), [
('cumprod', DataFrame([[np.nan, 1], [1., 2.]])),
('cumsum', DataFrame([[np.nan, 1], [1., 3.]])),
]),
))
def test_agg_cython_table_transform(self, df, func, expected, axis):
# GH 21224
# test transforming functions in
# pandas.core.base.SelectionMixin._cython_table (cumprod, cumsum)
result = df.agg(func, axis=axis)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("df, func, expected", _get_cython_table_params(
DataFrame([['a', 'b'], ['b', 'a']]), [
['cumprod', TypeError],
]),
)
def test_agg_cython_table_raises(self, df, func, expected, axis):
# GH 21224
with pytest.raises(expected):
df.agg(func, axis=axis)
@pytest.mark.parametrize("num_cols", [2, 3, 5])
def test_frequency_is_original(self, num_cols):
# GH 22150
index = pd.DatetimeIndex(["1950-06-30", "1952-10-24", "1953-05-29"])
original = index.copy()
df = DataFrame(1, index=index, columns=range(num_cols))
df.apply(lambda x: x)
assert index.freq == original.freq
| bsd-3-clause |
penny4860/object-detector | object_detector/utils.py | 1 | 3338 | #-*- coding: utf-8 -*-
import cv2
import sklearn.feature_extraction.image as skimg
def crop_bb(image, bb, padding=10, dst_size=(32, 32)):
"""Crop patches from an image with desired bounding box.
Parameters
----------
image : array, shape (n_rows, n_cols, n_channels) or (n_rows, n_cols)
Input image to crop.
bb : tuple, (y1, y2, x1, x2)
Desired bounding box.
dst_size : tuple, (h_size, w_size)
Desired size for returning bounding box.
Returns
----------
patches : array, shape of dst_size
Examples
--------
>>> from skimage import data
>>> img = data.camera() # Get Sample Image
>>> patch = crop_bb(img, (0,10, 10, 20), 2, (6,6))
>>> patch
array([[157, 157, 158, 157, 157, 158],
[158, 158, 158, 158, 158, 156],
[157, 158, 158, 157, 158, 156],
[158, 158, 158, 158, 158, 156],
[158, 156, 155, 155, 157, 155],
[157, 155, 156, 156, 156, 152]], dtype=uint8)
"""
h = image.shape[0]
w = image.shape[1]
(y1, y2, x1, x2) = bb
(x1, y1) = (max(x1 - padding, 0), max(y1 - padding, 0))
(x2, y2) = (min(x2 + padding, w), min(y2 + padding, h))
patch = image[y1:y2, x1:x2]
# Caution : dst_size is ordered in (y, x) but desired parameter in cv2.resize() is (x, y) order.
desired_ysize = dst_size[0]
desired_xsize = dst_size[1]
patch = cv2.resize(patch, (desired_xsize, desired_ysize), interpolation=cv2.INTER_AREA)
return patch
def crop_random(image, dst_size=(32, 32), n_patches=5):
"""Randomly crop patches from an image as desired size.
Parameters
----------
image : array, shape (n_rows, n_cols, n_channels) or (n_rows, n_cols)
Input image to crop.
dst_size : tuple, (h_size, w_size)
Desired size for croppig.
max_patches : int
Desired number of patches to crop
Returns
----------
patches : array, shape (max_patches, n_rows, n_cols, n_channels) or (max_patches, n_rows, n_cols)
Examples
--------
>>> import numpy as np
>>> import sklearn.feature_extraction.image as skimg
>>> one_image = np.arange(100).reshape((10, 10))
>>> patches = crop_random(one_image, (5,5), 2)
>>> patches.shape
(2L, 5L, 5L)
"""
patches = skimg.extract_patches_2d(image,
dst_size,
max_patches=n_patches)
return patches
def get_file_id(filename):
"""Get file id from filename which has "($var)_($id).($extension)" format.
($var) and ($extension) can be allowed anything format.
Parameters
----------
filename : str
Input filename to extract id
Returns
----------
file_id : str
($id) from "($var)_($id).($extension)" format
Examples
--------
>>> filename = "C:\Windows\System32\cmd.exe\image_0122.jpg"
>>> get_file_id(filename)
'0122'
"""
file_id = filename[filename.rfind("_") + 1:filename.rfind(".")]
return file_id
if __name__ == "__main__":
import doctest
doctest.testmod()
| mit |
yavalvas/yav_com | build/matplotlib/doc/mpl_examples/animation/histogram.py | 7 | 1682 | """
This example shows how to use a path patch to draw a bunch of
rectangles for an animated histogram
"""
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import matplotlib.path as path
import matplotlib.animation as animation
fig, ax = plt.subplots()
# histogram our data with numpy
data = np.random.randn(1000)
n, bins = np.histogram(data, 100)
# get the corners of the rectangles for the histogram
left = np.array(bins[:-1])
right = np.array(bins[1:])
bottom = np.zeros(len(left))
top = bottom + n
nrects = len(left)
# here comes the tricky part -- we have to set up the vertex and path
# codes arrays using moveto, lineto and closepoly
# for each rect: 1 for the MOVETO, 3 for the LINETO, 1 for the
# CLOSEPOLY; the vert for the closepoly is ignored but we still need
# it to keep the codes aligned with the vertices
nverts = nrects*(1+3+1)
verts = np.zeros((nverts, 2))
codes = np.ones(nverts, int) * path.Path.LINETO
codes[0::5] = path.Path.MOVETO
codes[4::5] = path.Path.CLOSEPOLY
verts[0::5,0] = left
verts[0::5,1] = bottom
verts[1::5,0] = left
verts[1::5,1] = top
verts[2::5,0] = right
verts[2::5,1] = top
verts[3::5,0] = right
verts[3::5,1] = bottom
barpath = path.Path(verts, codes)
patch = patches.PathPatch(barpath, facecolor='green', edgecolor='yellow', alpha=0.5)
ax.add_patch(patch)
ax.set_xlim(left[0], right[-1])
ax.set_ylim(bottom.min(), top.max())
def animate(i):
# simulate new data coming in
data = np.random.randn(1000)
n, bins = np.histogram(data, 100)
top = bottom + n
verts[1::5,1] = top
verts[2::5,1] = top
ani = animation.FuncAnimation(fig, animate, 100, repeat=False)
plt.show()
| mit |
ilanfri/StatsML | bootstrap_general.py | 1 | 6589 | #!/usr/bin/python
# IMPLEMENTS THE CLASSICAL BOOTSTRAP METHOD FOR NON-PARAMETRIC ESTIMATION
# TESTS THE IMPLEMENTATION ON A SMALL DATA SAMPLE
import numpy as np
import numpy.random as npr
#import pylab
import matplotlib.pyplot as plt
import scipy
import scikits.bootstrap as btstrp
import pandas as pan
import os, shutil
#from pandas.tools.plotting import bootstrap_plot
from scipy import stats
#import pickle_csv as pcsv
def bootstrap(data, num_samples, statistic, alpha):
"""Returns the results from num_samples bootstrap samples for an input test statistic, its standard deviation, and its 100*(1-alpha)% confidence level interval."""
# Generate the indices for the required number of permutations/(resamplings with replacement) required
idx = npr.randint(0, len(data), (num_samples, len(data)))
# Generate the multiple resampled data set from the original one
samples = data[idx]
# Apply the 'statistic' function given to each of the data sets produced by the resampling and order the resulting statistic by decreasing size.
stats = np.sort(statistic(samples, 1))
stat = stats.mean()
# Return the value of the computed statistic at the upper and lower percentiles specified by the alpha parameter given. These are, by definition, the boundaries of the Confidence Interval for that value of alpha. E.g. alpha=0.05 ==> CI 95%
low_ci = stats[int((alpha / 2.0) * num_samples)]
high_ci = stats[int((1 - alpha / 2.0) * num_samples)]
#sd = np.std(stat)
# To include Bessel's correction for unbiased standard deviation:
sd = np.sqrt(len(data) / (len(data) - 1)) * np.std(stats)
return stat, sd, low_ci, high_ci
if __name__ == '__main__':
filename = '2008_test.csv'
na_symbol = 'NA'
# Read input data file in, create serialised copy if none exists
#pklfilename=(str(filename).replace('.csv',''))+'.pkl'
pklfilename = (str(filename).replace('.csv', '')) + '.h5'
rootdir = os.getcwd()
filepath = os.path.join(rootdir, pklfilename)
if (os.path.exists(filepath) == True):
#data=pan.read_pickle(pklfilename)
data = pan.read_hdf(pklfilename, 'data')
else:
data = pan.read_csv(filename, na_values=na_symbol)
#data.to_pickle(pklfilename)
data.to_hdf(pklfilename, 'data', mode='w')
# Choose two arbitrary (mutually exclusive) subsets of data for statistical comparison
testdist1 = data["ArrDelay"].dropna()[data["Distance"] < 1000]
testdist2 = data["ArrDelay"].dropna()[(data["Distance"] >= 1000) &
(data["Distance"] < 2000)]
#testdist2=data[data["Distance"]>=1000]["ArrDelay"]
z, pmww = stats.ranksums(testdist1, testdist2)
print "\n\nThe MWW RankSum p-value for flights less than 1000 miles vs [1000,2000) miles:\n{0:.3g}\n".format(
pmww)
#testdist3=data[data["Distance"]>2000]["ArrDelay"]
testdist3 = data["ArrDelay"].dropna()[data["Distance"] >= 2000]
f, panova = stats.f_oneway(testdist1, testdist2, testdist3)
print "One-way ANOVA p-value for <1000, [1000,2000), and >= 2000 miles flight data sets:\n{0:.3g}".format(
panova)
nbootstraps = 10000
alpha = 0.05
# Observed sample mean and standard deviation
print "\nOriginal-sample mean:\n{0:.3g}".format(testdist3.mean())
print "Original-sample standard deviation:\n{0:.3g}".format(
stats.sem(testdist3))
# Compute bootstrap mean, SD of mean, and CI of mean
meanbtstrp, meanbtstrpsd, lowmean, highmean = bootstrap(
np.array(testdist3), nbootstraps, np.mean, alpha)
print "\nBootstrap mean:\n{0:.3g}".format(meanbtstrp)
print "Bootrstrap SD of the mean:\n{0:.3g}".format(meanbtstrpsd)
print "\nBootstrapped 95% confidence intervals on the mean:\n[{0:.3g}, {1:.3g}]".format(
lowmean, highmean)
# Use Scipy's implementation of bootstrapping to do the same
CIs = btstrp.ci(data=testdist3,
statfunction=np.mean,
n_samples=nbootstraps,
alpha=alpha)
print "Scipy Bootstrapped 95% confidence intervals on the mean:\n[{0:.3g}, {1:.3g}]\n\n".format(
CIs[0], CIs[1])
# Options to deal with NA entries:
# Drop all of the entries which contain a NA: data.dropna()
# Drop NA entries in a particular column: data["Distance"].dropna()
# Replace NA with a given value: data.fillna(0.0)["Distance"]
# Fields are:
# Year,Month,DayofMonth,DayOfWeek,DepTime,CRSDepTime,ArrTime,CRSArrTime,UniqueCarrier,FlightNum,TailNum,ActualElapsedTime,CRSElapsedTime,AirTime,ArrDelay,DepDelay,Origin,Dest,Distance,TaxiIn,TaxiOut,Cancelled,CancellationCode,Diverted,CarrierDelay,WeatherDelay,NASDelay,SecurityDelay,LateAircraftDelay
# # make plots
# plt.figure(figsize=(8,4))
# plt.subplot(121)
# plt.hist(x, 50, histtype='step')
# plt.title('Histogram of data')
# plt.subplot(122)
# plt.plot([-0.03,0.03], [np.mean(x), np.mean(x)], 'r', linewidth=2)
# plt.scatter(0.1*(npr.random(len(x))-0.5), x)
# plt.plot([0.19,0.21], [lowmean, lowmean], 'r', linewidth=2)
# plt.plot([0.19,0.21], [highmean, highmean], 'r', linewidth=2)
# plt.plot([0.2,0.2], [lowmean, highmean], 'r', linewidth=2, label="Mean CI")
# # The SD interval
# plt.plot([0.25,0.27], [np.mean(x)-np.std(x), np.mean(x)-np.std(x)], 'g', linewidth=2)
# plt.plot([0.25,0.27], [np.mean(x)+np.std(x), np.mean(x)+np.std(x)], 'g', linewidth=2)
# plt.plot([0.26,0.26], [np.mean(x)-np.std(x), np.mean(x)+np.std(x)], 'g', linewidth=2)
# # CI bars on each of the downwards and upwards 1 SD bands about the mean
# plt.plot([0.29,0.31], [np.mean(x)-np.std(x)-(np.std(x)-lowsd), np.mean(x)-np.std(x)-(np.std(x)-lowsd)], 'g', linewidth=2)
# plt.plot([0.29,0.31], [np.mean(x)-np.std(x)+(highsd-np.std(x)), np.mean(x)-np.std(x)+(highsd-np.std(x))], 'g', linewidth=2)
# plt.plot([0.3,0.3], [np.mean(x)-np.std(x)-(np.std(x)-lowsd), np.mean(x)-np.std(x)+(highsd-np.std(x))], 'g', linewidth=2)
# plt.plot([0.29,0.31], [np.mean(x)+np.std(x)-(np.std(x)-lowsd), np.mean(x)+np.std(x)-(np.std(x)-lowsd)], 'g', linewidth=2)
# plt.plot([0.29,0.31], [np.mean(x)+np.std(x)+(highsd-np.std(x)), np.mean(x)+np.std(x)+(highsd-np.std(x))], 'g', linewidth=2)
# plt.plot([0.3,0.3], [np.mean(x)+np.std(x)-(np.std(x)-lowsd), np.mean(x)+np.std(x)+(highsd-np.std(x))], 'g', linewidth=2, label="SD CI")
# plt.xlim([-0.2, 0.35])
# plt.title('Bootstrap 95% CIs')
# plt.legend()
# plt.show()
# #plt.savefig('examples/boostrap.png')
| bsd-3-clause |
fridiculous/django-estimators | estimators/tests/test_estimators.py | 1 | 3377 |
import pytest
from django.core.exceptions import ValidationError
from estimators.models.estimators import Estimator
from estimators.tests.factories import EstimatorFactory
@pytest.mark.django_db
class TestEstimator():
def test_estimator_persistance_without_factory(self):
m = Estimator(estimator='new string', description='another object')
assert m.object_file.storage.exists(m.object_file.path) == False
assert m.is_file_persisted == False
m.save()
assert m.object_file.storage.exists(m.object_file.path) == True
assert m.is_file_persisted == True
def test_object_hash_with_factory(self):
m = EstimatorFactory(estimator=object)
assert m.estimator == object
del m
n = Estimator.objects.filter(estimator=object).first()
# sklearn hash of a object = 'd9c9f286391652b89978a6961b52b674'
assert n.object_hash == 'd9c9f286391652b89978a6961b52b674'
# assert loaded after calling n.estimator
assert n.estimator == object
assert Estimator._compute_hash(
object) == 'd9c9f286391652b89978a6961b52b674'
def test_get_or_create(self):
m, created = Estimator.objects.get_or_create(estimator='new_string_as_object')
m.save()
assert m.estimator == 'new_string_as_object'
assert created == True
n, created = Estimator.objects.get_or_create(estimator='new_string_as_object')
assert m == n
assert created == False
def test_update_or_create(self):
e = 'estimator_obj'
m, created = Estimator.objects.update_or_create(estimator=e)
m.save()
assert m.estimator == e
assert created == True
n, created = Estimator.objects.update_or_create(estimator=e)
assert m == n
assert created == False
def test_create_from_file_with_factory(self):
obj = "{'key': 'value'}"
m = EstimatorFactory(estimator=obj)
object_hash = m.object_hash
file_path = m.object_file.name
del m
m = Estimator.create_from_file(file_path)
assert m.estimator == obj
assert m.object_hash == object_hash
assert m.is_file_persisted == True
def test_update_estimator_fail(self):
m = Estimator(estimator='uneditable_object')
m.estimator = 'object_edited_before_persistance'
m.save()
m.estimator = 'object_edited_after_persistance'
with pytest.raises(ValidationError):
m.save()
def test_hashing_func(self):
object_hash = Estimator._compute_hash('abcd')
assert object_hash == '3062a9e3345c129799bd2c1603c2e966'
def test_hash_without_estimator_fail(self):
m = Estimator()
m.object_hash = 'randomly set hash'
with pytest.raises(ValidationError):
m.save()
def test_wrong_hash_fail(self):
m = Estimator(estimator='unique_object')
m.object_hash = 'randomly set hash'
with pytest.raises(ValidationError):
m.save()
def test_prevent_double_file_save(self):
EstimatorFactory(estimator='yes')
hash_of_yes = 'b635788f4b614e8469b470b8e9f68174'
e = Estimator.objects.get(object_hash=hash_of_yes)
assert e.is_file_persisted == True
e.save()
assert e.object_file.name.endswith(hash_of_yes)
| mit |
MaxNoe/pyhexgrid | hexgrid/plotting.py | 1 | 1203 | import numpy as np
import matplotlib.pyplot as plt
from matplotlib.collections import PatchCollection
from matplotlib.patches import RegularPolygon
def plot_hexagons(
hexpoints,
ax=None,
facecolor=None,
edgecolor=None,
linewidth=None,
key=None,
cmap=None,
):
ax = ax or plt.gca()
x, y = hexpoints.cartesian
if hexpoints.orientation == 'pointy_top':
orientation = 0
else:
orientation = np.pi / 6
hexagons = [
RegularPolygon(xy, 6, radius=1, orientation=orientation)
for xy in zip(x, y)
]
collection = PatchCollection(hexagons)
if facecolor is not None:
collection.set_facecolor(facecolor)
if edgecolor is not None:
collection.set_edgecolor(edgecolor)
if linewidth is not None:
collection.set_linewidth(linewidth)
if key is not None:
collection.set_array(hexpoints.data[key])
collection.set_cmap(plt.get_cmap(cmap))
ax.add_collection(collection)
ax.set_xlim(np.min(x) - 1, np.max(x) + 1)
ax.set_ylim(np.min(y) - 1, np.max(y) + 1)
ax.set_aspect(1)
plt.draw_if_interactive()
return collection
| mit |
embotech/forcesnlp-examples | quadcopter/ipopt/quadcopter.py | 1 | 10839 | import sys
sys.path.append(r"/home/andrea/casadi-py27-np1.9.1-v2.4.2")
from casadi import *
from numpy import *
from scipy.linalg import *
import matplotlib
matplotlib.use('Qt4Agg')
import matplotlib.pyplot as plt
from math import atan2, asin
import pdb
N = 5 # Control discretization
T = 1.0 # End time
nx = 17
nu = 4
T_sim = 8.0
N_sim = int(ceil(T_sim/(T/N)))
# Declare variables (use scalar graph)
u = SX.sym("u",nu) # control
x = SX.sym("x",nx) # states
# Dynamics definition
rho = 1.23
A = 0.1
Cl = 0.25
Cd = 0.3*Cl
m = 10
g = 9.81
L = 0.5
Jp = 1e-2
xi = 1e-2
J1 = 0.25
J2 = 0.25
J3 = 1
gain = 1e-4
alpha = 0.0
P1 = x[0]
P2 = x[1]
P3 = x[2]
V1 = x[3]
V2 = x[4]
V3 = x[5]
q1 = x[6]
q2 = x[7]
q3 = x[8]
q4 = x[9]
Omega1 = x[10]
Omega2 = x[11]
Omega3 = x[12]
W1 = x[13]
W2 = x[14]
W3 = x[15]
W4 = x[16]
rW1 = u[0]
rW2 = u[1]
rW3 = u[2]
rW4 = u[3]
ode = vertcat([ V1,\
V2,\
V3,\
(A*Cl*rho*(2*q1*q3 + 2*q2*q4)*(W1*W1 + W2*W2 + W3*W3 + W4*W4))/(2*m),\
-(A*Cl*rho*(2*q1*q2 - 2*q3*q4)*(W1*W1 + W2*W2 + W3*W3 + W4*W4))/(2*m),\
(A*Cl*rho*(W1*W1 + W2*W2 + W3*W3 + W4*W4)*(q1*q1 - q2*q2 - q3*q3 + q4*q4))/(2*m) - g,\
- (Omega1*q2)/2 - (Omega2*q3)/2 - (Omega3*q4)/2 - (alpha*q1*(q1*q1 + q2*q2 + q3*q3 + q4*q4 - 1))/(q1*q1 + q2*q2 + q3*q3 + q4*q4),\
(Omega1*q1)/2 - (Omega3*q3)/2 + (Omega2*q4)/2 - (alpha*q2*(q1*q1 + q2*q2 + q3*q3 + q4*q4 - 1))/(q1*q1 + q2*q2 + q3*q3 + q4*q4),\
(Omega2*q1)/2 + (Omega3*q2)/2 - (Omega1*q4)/2 - (alpha*q3*(q1*q1 + q2*q2 + q3*q3 + q4*q4 - 1))/(q1*q1 + q2*q2 + q3*q3 + q4*q4),\
(Omega3*q1)/2 - (Omega2*q2)/2 + (Omega1*q3)/2 - (alpha*q4*(q1*q1 + q2*q2 + q3*q3 + q4*q4 - 1))/(q1*q1 + q2*q2 + q3*q3 + q4*q4),\
(J3*Omega2*Omega3 - J2*Omega2*Omega3 + (A*Cl*L*rho*(W2*W2 - W4*W4))/2)/J1,\
-(J3*Omega1*Omega3 - J1*Omega1*Omega3 + (A*Cl*L*rho*(W1*W1 - W3*W3))/2)/J2,\
(J2*Omega1*Omega2 - J1*Omega1*Omega2 + (A*Cd*rho*(W1*W1 - W2*W2 + W3*W3 - W4*W4))/2)/J3,\
rW1,\
rW2,\
rW3,\
rW4])
f = SXFunction([x,u],[ode])
f.init()
# RK4 with M steps
U = MX.sym("U",nu)
X = MX.sym("X",nx)
M = 1 ; DT = T/(N*M)
XF = X
QF = 0
for j in range(M):
[k1] = f([XF, U])
[k2] = f([XF + DT/2 * k1, U])
[k3] = f([XF + DT/2 * k2, U])
[k4] = f([XF + DT * k3, U])
XF += DT/6*(k1 + 2*k2 + 2*k3 + k4)
F = MXFunction([X,U],[XF])
F.init()
# Formulate NLP (use matrix graph)
nv = nu*N + nx*(N+1)
v = MX.sym("v", nv)
# Get the state for each shooting interval
xk = [v[(nx + nu)*k : (nx + nu)*k + nx] for k in range(N+1)]
# Get the control for each shooting interval
uk = [v[(nx + nu)*k + nx:(nx + nu)*k + nx + nu] for k in range(N)]
# Variable bounds and initial guess
vmin = -100*ones(nv)
vmax = 100*ones(nv)
#vmin[nu*N + nx*(N):] = -5*ones(nx)
#vmax[nu*N + nx*(N):] = 5*ones(nx)
v0 = zeros(nv)
# Control bounds
max_rate = 40
vmin[0::nx+nu] = -10
vmin[1::nx+nu] = -10
vmin[2::nx+nu] = -10
vmin[3::nx+nu] = -100
vmin[4::nx+nu] = -100
vmin[5::nx+nu] = -100
vmin[6::nx+nu] = -5
vmin[7::nx+nu] = -5
vmin[8::nx+nu] = -5
vmin[9::nx+nu] = -5
vmin[10::nx+nu] = -40
vmin[11::nx+nu] = -40
vmin[12::nx+nu] = -40
vmin[13::nx+nu] = -50
vmin[14::nx+nu] = -50
vmin[15::nx+nu] = -50
vmin[16::nx+nu] = -50
vmin[nx+0::nx+nu] = -max_rate
vmin[nx+1::nx+nu] = -max_rate
vmin[nx+2::nx+nu] = -max_rate
vmin[nx+3::nx+nu] = -max_rate
vmax[0::nx+nu] = 10
vmax[1::nx+nu] = 10
vmax[2::nx+nu] = 10
vmax[3::nx+nu] = 100
vmax[4::nx+nu] = 100
vmax[5::nx+nu] = 100
vmax[6::nx+nu] = 5
vmax[7::nx+nu] = 5
vmax[8::nx+nu] = 5
vmax[9::nx+nu] = 5
vmax[10::nx+nu] = 40
vmax[11::nx+nu] = 40
vmax[12::nx+nu] = 40
vmax[13::nx+nu] = 50
vmax[14::nx+nu] = 50
vmax[15::nx+nu] = 50
vmax[16::nx+nu] = 50
vmax[nx+0::nx+nu] = max_rate
vmax[nx+1::nx+nu] = max_rate
vmax[nx+2::nx+nu] = max_rate
vmax[nx+3::nx+nu] = max_rate
initial_angle_rad = 3.0
# Initial condition
hover_omega = 39.939
x0 = array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, cos(initial_angle_rad/2.0), sin(initial_angle_rad/2.0), 0.0, 0.0, 0.0, 0.0, 0.0, hover_omega, hover_omega, hover_omega, hover_omega])
for i in range(nx):
vmin[i] = vmax[i] = v0[i] = x0[i]
# Initial solution guess
v0 = 1.0*ones(nv)
# for k in range(N):
# v0[(nx + nu)*k + nx:(nx + nu)*k + nx + nu] = [40, 40, 40, 40]
# Constraint function with bounds
g = []; gmin = []; gmax = []
# Objective function
J=0.0
# Build up a graph of integrator calls
w_pos = 50.0
w_vel = 5.0
w_att = 20.00
w_omega = 5.0
w_in = 0.01
w_rate = 0.01
H_end_diag = array([w_pos, w_pos, w_pos, w_vel, w_vel, w_vel, w_att, w_att, w_att, w_att, w_omega, w_omega, w_omega, w_in, w_in, w_in, w_in])
H_diag = array([w_pos, w_pos, w_pos, w_vel, w_vel, w_vel, w_att, w_att, w_att, w_att, w_omega, w_omega, w_omega, w_in, w_in, w_in, w_in, w_rate, w_rate, w_rate, w_rate])
for k in range(N):
# Call the integrator
[xf] = F([xk[k],uk[k]])
# position
J+= 1.0/2.0*H_diag[0]*pow(xk[k][0],2)
J+= 1.0/2.0*H_diag[1]*pow(xk[k][1],2)
J+= 1.0/2.0*H_diag[2]*pow(xk[k][2],2)
# velocities
J+= 1.0/2.0*H_diag[3]*pow(xk[k][3],2)
J+= 1.0/2.0*H_diag[4]*pow(xk[k][4],2)
J+= 1.0/2.0*H_diag[5]*pow(xk[k][5],2)
# attitude
J+= 1.0/2.0*H_diag[6]*pow(xk[k][6] - 1.0,2)
J+= 1.0/2.0*H_diag[7]*pow(xk[k][7],2)
J+= 1.0/2.0*H_diag[8]*pow(xk[k][8],2)
J+= 1.0/2.0*H_diag[9]*pow(xk[k][9],2)
# omega
J+= 1.0/2.0*H_diag[10]*pow(xk[k][10],2)
J+= 1.0/2.0*H_diag[11]*pow(xk[k][11],2)
J+= 1.0/2.0*H_diag[12]*pow(xk[k][12],2)
# inputs
J+= 1.0/2.0*H_diag[13]*pow(xk[k][13] - hover_omega,2)
J+= 1.0/2.0*H_diag[14]*pow(xk[k][14] - hover_omega,2)
J+= 1.0/2.0*H_diag[15]*pow(xk[k][15] - hover_omega,2)
J+= 1.0/2.0*H_diag[16]*pow(xk[k][16] - hover_omega,2)
for j in range(nu):
J+= 1.0/2.0*H_diag[nx+j]*pow(uk[k][j],2)
g.append(xf - xk[k+1])
gmin.append(zeros(nx))
gmax.append(zeros(nx))
# Terminal cost
k = N
# position
J+= 1.0/2.0*H_end_diag[0]*pow(xk[k][0],2)
J+= 1.0/2.0*H_end_diag[1]*pow(xk[k][1],2)
J+= 1.0/2.0*H_end_diag[2]*pow(xk[k][2],2)
# velocities
J+= 1.0/2.0*H_end_diag[3]*pow(xk[k][3],2)
J+= 1.0/2.0*H_end_diag[4]*pow(xk[k][4],2)
J+= 1.0/2.0*H_end_diag[5]*pow(xk[k][5],2)
# attitude
J+= 1.0/2.0*H_end_diag[6]*pow(xk[k][6] - 1.0,2)
J+= 1.0/2.0*H_end_diag[7]*pow(xk[k][7],2)
J+= 1.0/2.0*H_end_diag[8]*pow(xk[k][8],2)
J+= 1.0/2.0*H_end_diag[9]*pow(xk[k][9],2)
# omega
J+= 1.0/2.0*H_end_diag[10]*pow(xk[k][10],2)
J+= 1.0/2.0*H_end_diag[11]*pow(xk[k][11],2)
J+= 1.0/2.0*H_end_diag[12]*pow(xk[k][12],2)
# inputs
J+= 1.0/2.0*H_end_diag[13]*pow(xk[k][13] - hover_omega,2)
J+= 1.0/2.0*H_end_diag[14]*pow(xk[k][14] - hover_omega,2)
J+= 1.0/2.0*H_end_diag[15]*pow(xk[k][15] - hover_omega,2)
J+= 1.0/2.0*H_end_diag[16]*pow(xk[k][16] - hover_omega,2)
# Concatenate constraints
g = vertcat(g)
gmin = concatenate(gmin)
gmax = concatenate(gmax)
# Gauss-Newton hessian:
H = block_diag(H_diag)
H_end = block_diag(H_end_diag)
Hgn = kron(eye(N),H)
Hgn = block_diag(Hgn,H_end)
h=SXFunction(hessLagIn(), hessLagOut(hess=Hgn))
# Create NLP solver instance
opts = {'jit':True,"jit_options":{"flags":['-O0']}}
nlp = MXFunction('nlp',nlpIn(x=v),nlpOut(f=J,g=g),opts)
# nlp = MXFunction('nlp',nlpIn(x=v),nlpOut(f=J,g=g))
solver = NlpSolver("nlp_solver", "ipopt", nlp)
# nlp = MXFunction(nlpIn(x=v),nlpOut(f=J,g=g))
# solver = NlpSolver("ipopt", nlp)
#solver.setOption("tol",1.0e-4)
#solver.setOption("constr_viol_tol",1.0e-4)
#solver.setOption("compl_inf_tol",1.0e-4)
#solver.setOption("dual_inf_tol",1.0e-4)
#solver.setOption("accept_every_trial_step","yes")
solver.setOption("limited_memory_update_type","bfgs")
solver.setOption("print_level",5)
solver.setOption("output_file","ipopt_log.txt")
# solver.setOption("linear_solver","ma57")
solver.init()
# Set bounds and initial guess
solver.setInput(v0, "x0")
solver.setInput(vmin, "lbx")
solver.setInput(vmax, "ubx")
solver.setInput(gmin, "lbg")
solver.setInput(gmax, "ubg")
# Simulation loop
X = zeros((nx,N_sim))
U = zeros((nu,N_sim))
full_time = zeros(N_sim)
time = zeros(N_sim)
iter = zeros(N_sim)
#pdb.set_trace()
for i in range(N_sim):
# Solve the problem
solver.evaluate()
stat = solver.getStats()
full_time[i] = stat["t_mainloop.proc"]
time[i] = stat["t_mainloop.proc"] - stat["t_eval_f.proc"] - stat["t_eval_g.proc"] - stat["t_eval_grad_f.proc"] - stat["t_eval_jac_g.proc"] - stat["t_eval_h.proc"]
iter[i] = stat["iter_count"]
# Retrieve the solution
v_opt = solver.getOutput("x")
for j in range(nx):
X[j,i] = v_opt[j]
for j in range(nu):
U[j,i] = v_opt[nx+j]
# Update initial condition
for j in range(nx):
vmin[j] = vmax[j] = v_opt[nx+nu+j]
solver.init()
solver.setInput(v0, "x0")
solver.setInput(vmin, "lbx")
solver.setInput(vmax, "ubx")
solver.setInput(gmin, "lbg")
solver.setInput(gmax, "ubg")
# Plot the results
plt.figure(1)
plt.clf()
plt.subplot(311)
plt.plot(linspace(0,DT*M*N_sim,N_sim),X[0,:],'--')
plt.plot(linspace(0,DT*M*N_sim,N_sim),X[1,:],'--')
plt.plot(linspace(0,DT*M*N_sim,N_sim),X[2,:],'--')
plt.title("Quadcopter - Ipopt")
plt.xlabel('time')
plt.legend(['x','y','z'])
plt.grid()
# Convert quaternions to Euler angles (rad)
angles = zeros((3,N_sim))
for i in range(N_sim):
q0 = X[6,i]
q1 = X[7,i]
q2 = X[8,i]
q3 = X[9,i]
angles[0,i] = atan2(2*(q0*q1 + q2*q3),(1 - 2*(q1**2 + q2**2)))
angles[1,i] = asin(2*(q0*q2 - q3*q1))
angles[2,i] = atan2(2*(q0*q3 + q1*q2),(1 - 2*(q2**2 + q3**2)))
plt.subplot(312)
plt.plot(linspace(0,DT*M*N_sim,N_sim),angles[0,:])
plt.plot(linspace(0,DT*M*N_sim,N_sim),angles[1,:])
plt.plot(linspace(0,DT*M*N_sim,N_sim),angles[2,:])
plt.xlabel('time')
plt.legend(['phi','theta','psi'])
plt.grid()
#plt.step(linspace(0,T,N),u_opt,'-.')
plt.subplot(313)
plt.step(linspace(0,DT*M*N_sim,N_sim),U[0,:])
plt.step(linspace(0,DT*M*N_sim,N_sim),U[1,:])
plt.step(linspace(0,DT*M*N_sim,N_sim),U[2,:])
plt.step(linspace(0,DT*M*N_sim,N_sim),U[3,:])
plt.xlabel('time')
plt.legend(['w1','w2','w3','w4'])
plt.grid()
#plt.legend(['force'])
plt.figure(2)
plt.subplot(211)
plt.plot(linspace(0,DT*M*N_sim,N_sim),time)
#plt.plot(linspace(0,T,N_sim),full_time,'--')
plt.xlabel('time')
plt.legend(['CPU time w/o f eval','total CPU time'])
plt.grid()
plt.subplot(212)
plt.plot(linspace(0,DT*M*N_sim,N_sim),iter)
plt.xlabel('time')
plt.legend(['iter'])
plt.grid()
# Store results
savez('quadcopter_sim_ipopt.mpy', X=X, U=U, time=time, full_time=full_time, iter=iter)
savetxt('X.txt',X)
savetxt('U.txt',U)
savetxt('time_N5.txt',time)
savetxt('full_time_N5.txt',full_time)
savetxt('iter_N5.txt',iter)
plt.show()
| mit |
yade/trunk | doc/sphinx/ipython_directive.py | 2 | 18719 | # -*- coding: utf-8 -*-
"""Sphinx directive to support embedded IPython code.
This directive allows pasting of entire interactive IPython sessions, prompts
and all, and their code will actually get re-executed at doc build time, with
all prompts renumbered sequentially.
To enable this directive, simply list it in your Sphinx ``conf.py`` file
(making sure the directory where you placed it is visible to sphinx, as is
needed for all Sphinx directives).
By default this directive assumes that your prompts are unchanged IPython ones,
but this can be customized. For example, the following code in your Sphinx
config file will configure this directive for the following input/output
prompts ``Yade [1]:`` and ``-> [1]:``::
import ipython_directive as id
id.rgxin =re.compile(r'(?:In |Yade )\[(\d+)\]:\s?(.*)\s*')
id.rgxout=re.compile(r'(?:Out| -> )\[(\d+)\]:\s?(.*)\s*')
id.fmtin ='Yade [%d]:'
id.fmtout=' -> [%d]:'
id.rc_override=dict(
prompt_in1="Yade [\#]:",
prompt_in2=" .\D..",
prompt_out=" -> [\#]:"
)
id.reconfig_shell()
import ipython_console_highlighting as ich
ich.IPythonConsoleLexer.input_prompt=
re.compile("(Yade \[[0-9]+\]: )|( \.\.\.+:)")
ich.IPythonConsoleLexer.output_prompt=
re.compile("(( -> )|(Out)\[[0-9]+\]: )|( \.\.\.+:)")
ich.IPythonConsoleLexer.continue_prompt=re.compile(" \.\.\.+:")
ToDo
----
- Turn the ad-hoc test() function into a real test suite.
- Break up ipython-specific functionality from matplotlib stuff into better
separated code.
- Make sure %bookmarks used internally are removed on exit.
Authors
-------
- John D Hunter: orignal author.
- Fernando Perez: refactoring, documentation, cleanups.
- VáclavŠmilauer <eudoxos-AT-arcig.cz>: Prompt generatlizations.
"""
from __future__ import print_function
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Stdlib
from future import standard_library
standard_library.install_aliases()
from builtins import range
from builtins import object
import io
import imp
import os
import re
import shutil
import sys
import warnings
# To keep compatibility with various python versions
try:
from hashlib import md5
except ImportError:
from md5 import md5
# Third-party
import matplotlib
import sphinx
from docutils.parsers.rst import directives
matplotlib.use('Agg')
# Our own
import IPython
from IPython.Shell import MatplotlibShell
#-----------------------------------------------------------------------------
# Globals
#-----------------------------------------------------------------------------
sphinx_version = sphinx.__version__.split(".")
# The split is necessary for sphinx beta versions where the string is
# '6b1'
sphinx_version = tuple([int(re.split('[a-z]', x)[0])
for x in sphinx_version[:2]])
COMMENT, INPUT, OUTPUT = list(range(3))
rc_override = {}
rgxin = re.compile('In \[(\d+)\]:\s?(.*)\s*')
rgxcont = re.compile(' \.+:\s?(.*)\s*')
rgxout = re.compile('Out\[(\d+)\]:\s?(.*)\s*')
fmtin = 'In [%d]:'
fmtout = 'Out[%d]:'
fmtcont = ' .\D.:'
#-----------------------------------------------------------------------------
# Functions and class declarations
#-----------------------------------------------------------------------------
def block_parser(part):
"""
part is a string of ipython text, comprised of at most one
input, one ouput, comments, and blank lines. The block parser
parses the text into a list of::
blocks = [ (TOKEN0, data0), (TOKEN1, data1), ...]
where TOKEN is one of [COMMENT | INPUT | OUTPUT ] and
data is, depending on the type of token::
COMMENT : the comment string
INPUT: the (DECORATOR, INPUT_LINE, REST) where
DECORATOR: the input decorator (or None)
INPUT_LINE: the input as string (possibly multi-line)
REST : any stdout generated by the input line (not OUTPUT)
OUTPUT: the output string, possibly multi-line
"""
block = []
lines = part.split('\n')
N = len(lines)
i = 0
decorator = None
while 1:
if i==N:
# nothing left to parse -- the last line
break
line = lines[i]
i += 1
line_stripped = line.strip()
if line_stripped.startswith('#'):
block.append((COMMENT, line))
continue
if line_stripped.startswith('@'):
# we're assuming at most one decorator -- may need to
# rethink
decorator = line_stripped
continue
# does this look like an input line?
matchin = rgxin.match(line)
if matchin:
lineno, inputline = int(matchin.group(1)), matchin.group(2)
# the ....: continuation string
#continuation = ' %s:'%''.join(['.']*(len(str(lineno))+2))
#Nc = len(continuation)
# input lines can continue on for more than one line, if
# we have a '\' line continuation char or a function call
# echo line 'print'. The input line can only be
# terminated by the end of the block or an output line, so
# we parse out the rest of the input line if it is
# multiline as well as any echo text
rest = []
while i<N:
# look ahead; if the next line is blank, or a comment, or
# an output line, we're done
nextline = lines[i]
matchout = rgxout.match(nextline)
matchcont = rgxcont.match(nextline)
#print "nextline=%s, continuation=%s, starts=%s"%(nextline, continuation, nextline.startswith(continuation))
if matchout or nextline.startswith('#'):
break
elif matchcont: #nextline.startswith(continuation):
inputline += '\n' + matchcont.group(1) #nextline[Nc:]
else:
rest.append(nextline)
i+= 1
block.append((INPUT, (decorator, inputline, '\n'.join(rest))))
continue
# if it looks like an output line grab all the text to the end
# of the block
matchout = rgxout.match(line)
if matchout:
lineno, output = int(matchout.group(1)), matchout.group(2)
if i<N-1:
output = '\n'.join([output] + lines[i:])
block.append((OUTPUT, output))
break
return block
class EmbeddedSphinxShell(object):
"""An embedded IPython instance to run inside Sphinx"""
def __init__(self):
self.cout = io.StringIO()
IPython.Shell.Term.cout = self.cout
IPython.Shell.Term.cerr = self.cout
argv = ['-autocall', '0']
self.user_ns = {}
self.user_glocal_ns = {}
self.IP = IPython.ipmaker.make_IPython(
argv, self.user_ns, self.user_glocal_ns, embedded=True,
#shell_class=IPython.Shell.InteractiveShell,
shell_class=MatplotlibShell,
rc_override = dict(colors = 'NoColor', **rc_override))
self.input = ''
self.output = ''
self.is_verbatim = False
self.is_doctest = False
self.is_suppress = False
# on the first call to the savefig decorator, we'll import
# pyplot as plt so we can make a call to the plt.gcf().savefig
self._pyplot_imported = False
# we need bookmark the current dir first so we can save
# relative to it
self.process_input_line('bookmark ipy_basedir')
self.cout.seek(0)
self.cout.truncate(0)
def process_input_line(self, line):
"""process the input, capturing stdout"""
#print "input='%s'"%self.input
stdout = sys.stdout
sys.stdout = self.cout
#self.IP.resetbuffer()
self.IP.push(self.IP.prefilter(line, 0))
#self.IP.runlines(line)
sys.stdout = stdout
# Callbacks for each type of token
def process_input(self, data, input_prompt, lineno):
"""Process data block for INPUT token."""
decorator, input, rest = data
image_file = None
#print 'INPUT:', data
is_verbatim = decorator=='@verbatim' or self.is_verbatim
is_doctest = decorator=='@doctest' or self.is_doctest
is_suppress = decorator=='@suppress' or self.is_suppress
is_savefig = decorator is not None and \
decorator.startswith('@savefig')
input_lines = input.split('\n')
#continuation = ' %s:'%''.join(['.']*(len(str(lineno))+2))
#Nc = len(continuation)
if is_savefig:
saveargs = decorator.split(' ')
filename = saveargs[1]
outfile = os.path.join('_static/%s'%filename)
# build out an image directive like
# .. image:: somefile.png
# :width 4in
#
# from an input like
# savefig somefile.png width=4in
imagerows = ['.. image:: %s'%outfile]
for kwarg in saveargs[2:]:
arg, val = kwarg.split('=')
arg = arg.strip()
val = val.strip()
imagerows.append(' :%s: %s'%(arg, val))
image_file = outfile
image_directive = '\n'.join(imagerows)
# TODO: can we get "rest" from ipython
#self.process_input_line('\n'.join(input_lines))
ret = []
is_semicolon = False
for i, line in enumerate(input_lines):
if line.endswith(';'):
is_semicolon = True
if i==0:
# process the first input line
if is_verbatim:
self.process_input_line('')
else:
# only submit the line in non-verbatim mode
self.process_input_line(line)
formatted_line = '%s %s'%(input_prompt, line)
else:
# process a continuation line
if not is_verbatim:
self.process_input_line(line)
formatted_line = fmtcont.replace('\D','.'*len(str(lineno)))+line #'%s %s'%(continuation, line)
if not is_suppress:
ret.append(formatted_line)
if not is_suppress:
if len(rest.strip()):
if is_verbatim:
# the "rest" is the standard output of the
# input, which needs to be added in
# verbatim mode
ret.append(rest)
self.cout.seek(0)
output = self.cout.read()
if not is_suppress and not is_semicolon:
ret.append(output)
self.cout.truncate(0)
return ret, input_lines, output, is_doctest, image_file
#print 'OUTPUT', output # dbg
def process_output(self, data, output_prompt,
input_lines, output, is_doctest, image_file):
"""Process data block for OUTPUT token."""
if is_doctest:
submitted = data.strip()
found = output
if found is not None:
ind = found.find(output_prompt)
if ind<0:
raise RuntimeError('output prompt="%s" does not match out line=%s'%(output_prompt, found))
found = found[len(output_prompt):].strip()
if found!=submitted:
raise RuntimeError('doctest failure for input_lines="%s" with found_output="%s" and submitted output="%s"'%(input_lines, found, submitted))
#print 'doctest PASSED for input_lines="%s" with found_output="%s" and submitted output="%s"'%(input_lines, found, submitted)
def process_comment(self, data):
"""Process data block for COMMENT token."""
if not self.is_suppress:
return [data]
def process_block(self, block):
"""
process block from the block_parser and return a list of processed lines
"""
ret = []
output = None
input_lines = None
m = rgxin.match(str(self.IP.outputcache.prompt1).strip())
lineno = int(m.group(1))
input_prompt = fmtin%lineno
output_prompt = fmtout%lineno
image_file = None
image_directive = None
# XXX - This needs a second refactor. There's too much state being
# held globally, which makes for a very awkward interface and large,
# hard to test functions. I've already broken this up at least into
# three separate processors to isolate the logic better, but this only
# serves to highlight the coupling. Next we need to clean it up...
for token, data in block:
if token==COMMENT:
out_data = self.process_comment(data)
elif token==INPUT:
out_data, input_lines, output, is_doctest, image_file= \
self.process_input(data, input_prompt, lineno)
elif token==OUTPUT:
out_data = \
self.process_output(data, output_prompt,
input_lines, output, is_doctest,
image_file)
if out_data:
ret.extend(out_data)
if image_file is not None:
self.ensure_pyplot()
command = 'plt.gcf().savefig("%s")'%image_file
#print 'SAVEFIG', command # dbg
self.process_input_line('bookmark ipy_thisdir')
self.process_input_line('cd -b ipy_basedir')
self.process_input_line(command)
self.process_input_line('cd -b ipy_thisdir')
self.cout.seek(0)
self.cout.truncate(0)
return ret, image_directive
def ensure_pyplot(self):
if self._pyplot_imported:
return
self.process_input_line('import matplotlib.pyplot as plt')
# A global instance used below. XXX: not sure why this can't be created inside
# ipython_directive itself.
shell = EmbeddedSphinxShell()
def reconfig_shell():
"""Called after setting module-level variables to re-instantiate
with the set values (since shell is instantiated first at import-time
when module variables have default values)"""
global shell
shell = EmbeddedSphinxShell()
def ipython_directive(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine,
):
debug = ipython_directive.DEBUG
shell.is_suppress = 'suppress' in options
shell.is_doctest = 'doctest' in options
shell.is_verbatim = 'verbatim' in options
#print 'ipy', shell.is_suppress, options
parts = '\n'.join(content).split('\n\n')
lines = ['.. sourcecode:: ipython', '']
figures = []
for part in parts:
block = block_parser(part)
if len(block):
rows, figure = shell.process_block(block)
for row in rows:
lines.extend([' %s'%line for line in row.split('\n')])
if figure is not None:
figures.append(figure)
for figure in figures:
lines.append('')
lines.extend(figure.split('\n'))
lines.append('')
#print lines
if len(lines)>2:
if debug:
print('\n'.join(lines))
else:
#print 'INSERTING %d lines'%len(lines)
state_machine.insert_input(
lines, state_machine.input_lines.source(0))
return []
ipython_directive.DEBUG = False
# Enable as a proper Sphinx directive
def setup(app):
setup.app = app
options = {'suppress': directives.flag,
'doctest': directives.flag,
'verbatim': directives.flag,
}
app.add_directive('ipython', ipython_directive, True, (0, 2, 0), **options)
# Simple smoke test, needs to be converted to a proper automatic test.
def test():
examples = [
r"""
In [9]: pwd
Out[9]: '/home/jdhunter/py4science/book'
In [10]: cd bookdata/
/home/jdhunter/py4science/book/bookdata
In [2]: from pylab import *
In [2]: ion()
In [3]: im = imread('stinkbug.png')
@savefig mystinkbug.png width=4in
In [4]: imshow(im)
Out[4]: <matplotlib.image.AxesImage object at 0x39ea850>
""",
r"""
In [1]: x = 'hello world'
# string methods can be
# used to alter the string
@doctest
In [2]: x.upper()
Out[2]: 'HELLO WORLD'
@verbatim
In [3]: x.st<TAB>
x.startswith x.strip
""",
r"""
In [130]: url = 'http://ichart.finance.yahoo.com/table.csv?s=CROX\
.....: &d=9&e=22&f=2009&g=d&a=1&br=8&c=2006&ignore=.csv'
In [131]: print url.split('&')
['http://ichart.finance.yahoo.com/table.csv?s=CROX', 'd=9', 'e=22', 'f=2009', 'g=d', 'a=1', 'b=8', 'c=2006', 'ignore=.csv']
In [60]: import urllib
""",
r"""\
In [133]: import numpy.random
@suppress
In [134]: numpy.random.seed(2358)
@doctest
In [135]: np.random.rand(10,2)
Out[135]:
array([[ 0.64524308, 0.59943846],
[ 0.47102322, 0.8715456 ],
[ 0.29370834, 0.74776844],
[ 0.99539577, 0.1313423 ],
[ 0.16250302, 0.21103583],
[ 0.81626524, 0.1312433 ],
[ 0.67338089, 0.72302393],
[ 0.7566368 , 0.07033696],
[ 0.22591016, 0.77731835],
[ 0.0072729 , 0.34273127]])
""",
r"""
In [106]: print x
jdh
In [109]: for i in range(10):
.....: print i
.....:
.....:
0
1
2
3
4
5
6
7
8
9
""",
r"""
In [144]: from pylab import *
In [145]: ion()
# use a semicolon to suppress the output
@savefig test_hist.png width=4in
In [151]: hist(np.random.randn(10000), 100);
@savefig test_plot.png width=4in
In [151]: plot(np.random.randn(10000), 'o');
""",
r"""
# use a semicolon to suppress the output
In [151]: plt.clf()
@savefig plot_simple.png width=4in
In [151]: plot([1,2,3])
@savefig hist_simple.png width=4in
In [151]: hist(np.random.randn(10000), 100);
""",
r"""
# update the current fig
In [151]: ylabel('number')
In [152]: title('normal distribution')
@savefig hist_with_text.png
In [153]: grid(True)
""",
]
ipython_directive.DEBUG = True
#options = dict(suppress=True)
options = dict()
for example in examples:
content = example.split('\n')
ipython_directive('debug', arguments=None, options=options,
content=content, lineno=0,
content_offset=None, block_text=None,
state=None, state_machine=None,
)
# Run test suite as a script
if __name__=='__main__':
test()
| gpl-2.0 |
AndKe/MAVProxy | MAVProxy/modules/lib/MacOS/backend_wxagg.py | 7 | 5884 | from __future__ import division, print_function
import matplotlib
from matplotlib.figure import Figure
from backend_agg import FigureCanvasAgg
import backend_wx # already uses wxversion.ensureMinimal('2.8')
from backend_wx import FigureManager, FigureManagerWx, FigureCanvasWx, \
FigureFrameWx, DEBUG_MSG, NavigationToolbar2Wx, error_msg_wx, \
draw_if_interactive, show, Toolbar, backend_version
import wx
class FigureFrameWxAgg(FigureFrameWx):
def get_canvas(self, fig):
return FigureCanvasWxAgg(self, -1, fig)
def _get_toolbar(self, statbar):
if matplotlib.rcParams['toolbar']=='classic':
toolbar = NavigationToolbarWx(self.canvas, True)
elif matplotlib.rcParams['toolbar']=='toolbar2':
toolbar = NavigationToolbar2WxAgg(self.canvas)
toolbar.set_status_bar(statbar)
else:
toolbar = None
return toolbar
class FigureCanvasWxAgg(FigureCanvasAgg, FigureCanvasWx):
"""
The FigureCanvas contains the figure and does event handling.
In the wxPython backend, it is derived from wxPanel, and (usually)
lives inside a frame instantiated by a FigureManagerWx. The parent
window probably implements a wxSizer to control the displayed
control size - but we give a hint as to our preferred minimum
size.
"""
def draw(self, drawDC=None):
"""
Render the figure using agg.
"""
DEBUG_MSG("draw()", 1, self)
FigureCanvasAgg.draw(self)
self.bitmap = _convert_agg_to_wx_bitmap(self.get_renderer(), None)
self._isDrawn = True
self.gui_repaint(drawDC=drawDC)
def blit(self, bbox=None):
"""
Transfer the region of the agg buffer defined by bbox to the display.
If bbox is None, the entire buffer is transferred.
"""
if bbox is None:
self.bitmap = _convert_agg_to_wx_bitmap(self.get_renderer(), None)
self.gui_repaint()
return
l, b, w, h = bbox.bounds
r = l + w
t = b + h
x = int(l)
y = int(self.bitmap.GetHeight() - t)
srcBmp = _convert_agg_to_wx_bitmap(self.get_renderer(), None)
srcDC = wx.MemoryDC()
srcDC.SelectObject(srcBmp)
destDC = wx.MemoryDC()
destDC.SelectObject(self.bitmap)
destDC.BeginDrawing()
destDC.Blit(x, y, int(w), int(h), srcDC, x, y)
destDC.EndDrawing()
destDC.SelectObject(wx.NullBitmap)
srcDC.SelectObject(wx.NullBitmap)
self.gui_repaint()
filetypes = FigureCanvasAgg.filetypes
def print_figure(self, filename, *args, **kwargs):
# Use pure Agg renderer to draw
FigureCanvasAgg.print_figure(self, filename, *args, **kwargs)
# Restore the current view; this is needed because the
# artist contains methods rely on particular attributes
# of the rendered figure for determining things like
# bounding boxes.
if self._isDrawn:
self.draw()
class NavigationToolbar2WxAgg(NavigationToolbar2Wx):
def get_canvas(self, frame, fig):
return FigureCanvasWxAgg(frame, -1, fig)
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
# in order to expose the Figure constructor to the pylab
# interface we need to create the figure here
DEBUG_MSG("new_figure_manager()", 3, None)
backend_wx._create_wx_app()
FigureClass = kwargs.pop('FigureClass', Figure)
fig = FigureClass(*args, **kwargs)
return new_figure_manager_given_figure(num, fig)
def new_figure_manager_given_figure(num, figure):
"""
Create a new figure manager instance for the given figure.
"""
frame = FigureFrameWxAgg(num, figure)
figmgr = frame.get_figure_manager()
if matplotlib.is_interactive():
figmgr.frame.Show()
return figmgr
#
# agg/wxPython image conversion functions (wxPython >= 2.8)
#
def _convert_agg_to_wx_image(agg, bbox):
"""
Convert the region of the agg buffer bounded by bbox to a wx.Image. If
bbox is None, the entire buffer is converted.
Note: agg must be a backend_agg.RendererAgg instance.
"""
if bbox is None:
# agg => rgb -> image
image = wx.EmptyImage(int(agg.width), int(agg.height))
image.SetData(agg.tostring_rgb())
return image
else:
# agg => rgba buffer -> bitmap => clipped bitmap => image
return wx.ImageFromBitmap(_WX28_clipped_agg_as_bitmap(agg, bbox))
def _convert_agg_to_wx_bitmap(agg, bbox):
"""
Convert the region of the agg buffer bounded by bbox to a wx.Bitmap. If
bbox is None, the entire buffer is converted.
Note: agg must be a backend_agg.RendererAgg instance.
"""
if bbox is None:
# agg => rgba buffer -> bitmap
return wx.BitmapFromBufferRGBA(int(agg.width), int(agg.height),
agg.buffer_rgba())
else:
# agg => rgba buffer -> bitmap => clipped bitmap
return _WX28_clipped_agg_as_bitmap(agg, bbox)
def _WX28_clipped_agg_as_bitmap(agg, bbox):
"""
Convert the region of a the agg buffer bounded by bbox to a wx.Bitmap.
Note: agg must be a backend_agg.RendererAgg instance.
"""
l, b, width, height = bbox.bounds
r = l + width
t = b + height
srcBmp = wx.BitmapFromBufferRGBA(int(agg.width), int(agg.height),
agg.buffer_rgba())
srcDC = wx.MemoryDC()
srcDC.SelectObject(srcBmp)
destBmp = wx.EmptyBitmap(int(width), int(height))
destDC = wx.MemoryDC()
destDC.SelectObject(destBmp)
destDC.BeginDrawing()
x = int(l)
y = int(int(agg.height) - t)
destDC.Blit(0, 0, int(width), int(height), srcDC, x, y)
destDC.EndDrawing()
srcDC.SelectObject(wx.NullBitmap)
destDC.SelectObject(wx.NullBitmap)
return destBmp | gpl-3.0 |
jakevdp/seaborn | setup.py | 6 | 3621 | #! /usr/bin/env python
#
# Copyright (C) 2012-2014 Michael Waskom <[email protected]>
import os
# temporarily redirect config directory to prevent matplotlib importing
# testing that for writeable directory which results in sandbox error in
# certain easy_install versions
os.environ["MPLCONFIGDIR"] = "."
DESCRIPTION = "Seaborn: statistical data visualization"
LONG_DESCRIPTION = """\
Seaborn is a library for making attractive and informative statistical graphics in Python. It is built on top of matplotlib and tightly integrated with the PyData stack, including support for numpy and pandas data structures and statistical routines from scipy and statsmodels.
Some of the features that seaborn offers are
- Several built-in themes that improve on the default matplotlib aesthetics
- Tools for choosing color palettes to make beautiful plots that reveal patterns in your data
- Functions for visualizing univariate and bivariate distributions or for comparing them between subsets of data
- Tools that fit and visualize linear regression models for different kinds of independent and dependent variables
- Functions that visualize matrices of data and use clustering algorithms to discover structure in those matrices
- A function to plot statistical timeseries data with flexible estimation and representation of uncertainty around the estimate
- High-level abstractions for structuring grids of plots that let you easily build complex visualizations
"""
DISTNAME = 'seaborn'
MAINTAINER = 'Michael Waskom'
MAINTAINER_EMAIL = '[email protected]'
URL = 'http://stanford.edu/~mwaskom/software/seaborn/'
LICENSE = 'BSD (3-clause)'
DOWNLOAD_URL = 'https://github.com/mwaskom/seaborn/'
VERSION = '0.6.dev'
try:
from setuptools import setup
_has_setuptools = True
except ImportError:
from distutils.core import setup
def check_dependencies():
install_requires = []
# Just make sure dependencies exist, I haven't rigorously
# tested what the minimal versions that will work are
# (help on that would be awesome)
try:
import numpy
except ImportError:
install_requires.append('numpy')
try:
import scipy
except ImportError:
install_requires.append('scipy')
try:
import matplotlib
except ImportError:
install_requires.append('matplotlib')
try:
import pandas
except ImportError:
install_requires.append('pandas')
return install_requires
if __name__ == "__main__":
install_requires = check_dependencies()
setup(name=DISTNAME,
author=MAINTAINER,
author_email=MAINTAINER_EMAIL,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
license=LICENSE,
url=URL,
version=VERSION,
download_url=DOWNLOAD_URL,
install_requires=install_requires,
packages=['seaborn', 'seaborn.external', 'seaborn.tests'],
classifiers=[
'Intended Audience :: Science/Research',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'License :: OSI Approved :: BSD License',
'Topic :: Scientific/Engineering :: Visualization',
'Topic :: Multimedia :: Graphics',
'Operating System :: POSIX',
'Operating System :: Unix',
'Operating System :: MacOS'],
)
| bsd-3-clause |
zhangyaonju/Global_GPP_VPM_NCEP_C3C4 | EVIgapfill/getVIref.py | 1 | 12660 | #-------------------------------------------------------------------------------
# Name: Preprocessing for the EVI reference
# Inputs: EVI for each 8-day from all tiles and quality layers
#
# Author: Yao Zhang
#
# Created: 3/29/2017
# Modified:
# Copyright: (c) eomf 2015
# Licence: <your licence>
#-------------------------------------------------------------------------------
import multiprocessing
import os
from os import listdir
from os.path import isfile, join
from osgeo import gdal
from osgeo.gdalconst import *
from scipy.signal import savgol_filter
from ctypes import *
import numpy as np
import numpy.ma as ma
#from netCDF4 import Dataset
import time
import pandas as pd
startTime = time.time()
root = '/data/ifs/modis/products_006/mod09a1/geotiff/'
dirref = '/data/ifs/VPM/driving_data/EVI_ref/'
'''
def nan_helper(y):
return np.isnan(y), lambda z: z.nonzero()[0]
'''
def VIsmooth(ndvi):
rdays = c_int(len(ndvi))
fun = cdll.LoadLibrary(os.getcwd() + '/bise.so')
outndvi = (c_double * len(ndvi))()
slidingperiod = c_int(np.sum(ndvi == 0)/(len(ndvi)/20))
#apply the bise algorithm
fun.bise(byref(rdays), (c_double * len(ndvi))(*ndvi), byref(slidingperiod), outndvi)
bisendvi = np.frombuffer(outndvi)
#print bisendvi
bisendvi[bisendvi == -1] = np.nan
peaks = []
threshold = 1.5
check = np.argwhere(np.isnan(bisendvi))
#print check
if len(check) < 3:
return ndvi
else:
for i in range(0, len(check)):
if i == 0:
if bisendvi[check[i]] > (threshold * np.mean(bisendvi[np.array([(check[len(check)-1], check[i+1])])])):
if bisendvi[check[i]] > 3000: peaks.append(check[i])
else:
if i == (len(check)-1):
if bisendvi[check[i]] > (threshold * np.mean(bisendvi[np.array([(check[i-1], check[1])])])):
if bisendvi[check[i]] > 3000: peaks.append(check[i])
else:
if bisendvi[check[i]] > (threshold * np.mean(bisendvi[check[np.array([i-1, i+1])]])):
if bisendvi[check[i]] > 3000: peaks.append(check[i])
bisendvi[peaks] = np.nan
return bisendvi
#
def buildVrtFile(root, doy, tile, product):
fileList = []
for year in range(2000, 2017):
tiledir = os.path.join(root, product, str(year), tile)
for path, subdirs, files in os.walk(tiledir):
for name in files:
if (str(1000+doy)[1:] == name[13:16]) and (".tif" == name[-4:]): fileList.append([os.path.join(path, name)])
fileList.sort()
print len(fileList), 'files were built into a vrt file'
if len(fileList) == 0: return 0
filename = os.path.join('/data/ifs/users/yzhang/TEMP/VRT', str(1000+doy)[1:]+tile+product+'_list.txt')
outFilelist = open(filename, 'w')
for file in fileList:
outFilelist.write(file[0]+'\r\n')
outFilelist.close()
return filename
def write_file(output_name, output_array, GeoT, xsize, ysize, proJ, driverName='GTiff'):
print "creating", output_name
dr = gdal.GetDriverByName(driverName)
dr.Register()
do = dr.Create(output_name, xsize, ysize, 1, gdal.GDT_UInt16, options=['COMPRESS=LZW'])
do.SetGeoTransform(GeoT)
do.SetProjection(proJ)
do.GetRasterBand(1).WriteArray(output_array)
do.GetRasterBand(1).SetNoDataValue(32767)
do = None
def export_array (Rasters, directory, prod, tile, index):
fileNum = Rasters.shape[0]
for i in range(fileNum):
fileName=os.path.join(directory, 'MOD09A1.'+str(1000+index[i])[1:]+'.'+tile+'.'+prod+'.tif')
write_file(fileName, Rasters[i, :, :], geoTran, cols, rows, geoProj, "GTiff")
def parallelize_dataframe(df):
df_split = np.array_split(df, 5, axis=1)
pool = multiprocessing.Pool(5)
df = np.concatenate(pool.map(dataframeapply, df_split), axis=1)
pool.close()
pool.join()
return df
def dataframeapply(df):
df = pd.DataFrame(np.concatenate([df[23:46, :], df, df[:23, :]]))
df_smoothed = df.apply(VIsmooth)
df_smoothed = df_smoothed.interpolate(axis=0)
#make a SG filter
df_select = df_smoothed.as_matrix()[23:69, :]
df_select[np.isnan(df_select)] = 0
bisendviSG = savgol_filter(df_select, window_length=5, polyorder=3)
#bisendvi = None
bisendviSG[bisendviSG < 0] = 0
return bisendviSG
def import_all_year_data(tile):
temp = np.zeros([46, 2400*2400], np.dtype(float))
if int(tile[5:6])<2:
temp[:]=np.nan
for doy in range(1, 369, 8):
evifile = buildVrtFile(root, doy, tile, 'evi')
cloudfile = buildVrtFile(root, doy, tile, 'cloudmask')
aerosolfile = buildVrtFile(root, doy, tile, 'aerosolmask')
#if no file found for this DOY
if evifile == 0: continue
#doyList.append(doy)
#build vrt for EVI
vrtEVI = os.path.join(os.path.dirname(evifile), str(1000+doy)[1:]+tile+'EVI_vrt.vrt')
print "Building the vrt file: ", evifile
os.system('gdalbuildvrt -separate -input_file_list '+evifile+' '+vrtEVI)
inEVI = gdal.Open(vrtEVI)
EVI = inEVI.ReadAsArray()
#build vrt for cloudmask
vrtcloud = os.path.join(os.path.dirname(cloudfile), str(1000+doy)[1:]+tile+'cloud_vrt.vrt')
print "Building the vrt file: ", cloudfile
os.system('gdalbuildvrt -separate -input_file_list '+cloudfile+' '+vrtcloud)
incloud = gdal.Open(vrtcloud)
cloud = incloud.ReadAsArray()
#build vrt for aerosol
vrtaerosol = os.path.join(os.path.dirname(aerosolfile), str(1000+doy)[1:]+tile+'aerosol_vrt.vrt')
print "Building the vrt file: ", aerosolfile
os.system('gdalbuildvrt -separate -input_file_list '+aerosolfile+' '+vrtaerosol)
inaerosol = gdal.Open(vrtaerosol)
aerosol = inaerosol.ReadAsArray()
global rows, cols, geoProj, geoTran
rows = 2400
cols = 2400
geoTran = inEVI.GetGeoTransform()
geoProj = inEVI.GetProjection()
#mask for bad quality
EVIgood = ma.masked_where((cloud != 1)|(aerosol == 0)|(EVI < 0)|(EVI > 10000), EVI)
EVIgood = EVIgood.reshape(EVIgood.size/2400/2400, 2400*2400)
medianEVI = np.nanmedian(EVIgood, axis=0)
EVI = None
aerosol = None
cloud = None
EVIgood = None
#assign to the 46 layer of matrix
temp[(doy-1)/8, :] = medianEVI
meanEVI = None
return temp
def smooth(tile):
#first use this function to get mean and save it in an array
temp = import_all_year_data(tile)
####after get the mean value for all doy, I will run a bise gapfill first
print temp.size
##when using the single processing
#inputVI = pd.DataFrame(temp)
#VIsmoothed = inputVI.apply(VIsmooth, axis=0)
#VIsmoothed = VIsmoothed.as_matrix()
#VIsmoothed = parallelize_dataframe(temp)
##when using the multiprocessing
VIsmoothed = dataframeapply(temp)
VIsmoothed = VIsmoothed.reshape(VIsmoothed.size/2400/2400, 2400, 2400)
TILEdir = os.path.join(dirref, tile)
if not os.path.exists(TILEdir):
os.makedirs(TILEdir)
export_array (Rasters=np.int16(VIsmoothed), directory=TILEdir, \
prod='EVI.BISE.SG', tile=tile, index=range(1, 369, 8))
temp = None
inputVI = None
VIsmoothed = None
def process_list(tile=None, mp=True, count=1):
if mp:
#count = multiprocessing.cpu_count()-save_cpus
pool = multiprocessing.Pool(processes=count)
pool.map(smooth, tile)
#
'''
tile = ['h17v00','h12v01','h13v01','h14v01','h15v01','h16v01','h17v01','h18v01','h19v01','h20v01','h21v01',\
'h22v01','h23v01','h09v02','h10v02','h11v02','h12v02','h13v02','h14v02','h15v02','h16v02','h17v02',\
'h18v02','h19v02','h20v02','h21v02','h22v02','h23v02','h24v02','h25v02','h26v02','h06v03','h07v03',\
'h08v03','h09v03','h10v03','h11v03','h12v03','h13v03','h14v03','h15v03','h17v03','h18v03','h19v03',\
'h20v03','h21v03','h22v03','h23v03','h24v03','h25v03','h26v03','h27v03','h28v03','h29v03','h08v04',\
'h09v04','h10v04','h11v04','h12v04','h13v04','h14v04','h17v04','h18v04','h19v04','h20v04','h21v04',\
'h22v04','h23v04','h24v04','h25v04','h26v04','h27v04','h28v04','h07v05','h08v05','h09v05','h10v05',\
'h11v05','h12v05','h15v05','h16v05','h17v05','h18v05','h19v05','h20v05','h21v05','h22v05','h23v05',\
'h24v05','h25v05','h26v05','h27v05','h28v05','h29v05','h30v05','h02v06','h03v06','h07v06','h08v06',\
'h09v06','h10v06','h11v06','h16v06','h17v06','h18v06','h19v06','h20v06','h21v06','h22v06','h23v06',\
'h24v06','h25v06','h26v06','h27v06','h28v06','h29v06','h30v06','h31v06','h01v07','h03v07','h07v07',\
'h08v07','h09v07','h10v07','h11v07','h12v07','h15v07','h16v07','h17v07','h18v07','h19v07','h20v07',\
'h21v07','h22v07','h23v07','h24v07','h25v07','h26v07','h27v07','h28v07','h29v07','h30v07','h31v07',\
'h32v07','h33v07','h34v07','h00v08','h01v08','h02v08','h08v08','h09v08','h10v08','h11v08','h12v08',\
'h13v08','h16v08','h17v08','h18v08','h19v08','h20v08','h21v08','h22v08','h23v08','h25v08','h26v08',\
'h27v08','h28v08','h29v08','h30v08','h31v08','h32v08','h33v08','h34v08','h35v08','h00v09','h01v09',\
'h02v09','h03v09','h04v09','h08v09','h09v09','h10v09','h11v09','h12v09','h13v09','h14v09','h16v09',\
'h18v09','h19v09','h20v09','h21v09','h22v09','h23v09','h25v09','h27v09','h28v09','h29v09','h30v09',\
'h31v09','h32v09','h33v09','h34v09','h35v09',\
#southhemisphere
'h00v10','h01v10','h02v10','h03v10','h04v10','h05v10','h10v10','h11v10','h12v10','h13v10','h14v10',\
'h17v10','h19v10','h20v10','h21v10','h22v10','h23v10','h27v10','h28v10','h29v10','h30v10','h31v10',\
'h32v10','h33v10','h34v10','h35v10','h01v11','h02v11','h03v11','h04v11','h05v11','h06v11','h08v11',\
'h10v11','h11v11','h12v11','h13v11','h14v11','h15v11','h19v11','h20v11','h21v11','h22v11','h23v11',\
'h27v11','h28v11','h29v11','h30v11','h31v11','h32v11','h33v11','h11v12','h12v12','h13v12','h16v12',\
'h17v12','h19v12','h20v12','h24v12','h27v12','h28v12','h29v12','h30v12','h31v12','h32v12','h05v13',\
'h12v13','h13v13','h17v13','h20v13','h21v13','h22v13','h28v13','h29v13','h30v13','h31v13','h13v14',\
'h14v14','h15v14','h16v14','h18v14','h22v14','h27v14','h28v14']
'''
'''
tile = ["h17v00","h13v01","h10v02","h21v02","h22v02","h20v04","h21v04","h23v04",\
"h24v04","h27v04","h08v05","h10v05","h11v05","h17v05","h19v05","h20v05","h21v05",\
"h22v05","h23v05","h24v05","h25v05","h26v05","h27v05","h28v05","h29v05","h30v05",\
"h02v06","h03v06","h07v06","h08v06","h09v06","h11v06","h16v06","h17v06","h18v06",\
"h19v06","h20v06","h21v06","h22v06","h23v06","h24v06","h25v06","h26v06","h27v06",\
"h28v06","h29v06","h30v06","h31v06","h01v07","h03v07","h08v07","h12v07","h24v07"]
tile = ["h00v10","h01v10","h02v06","h02v10","h03v10","h04v10","h07v05","h08v06","h09v05",\
"h09v06","h10v05","h10v09","h10v10","h11v01","h11v05","h11v10","h12v09","h12v10",\
"h13v10","h14v00","h14v04","h14v10","h15v00","h16v00","h17v04","h17v05","h18v00",\
"h18v06","h19v00","h19v04","h19v05","h19v06","h20v00","h20v06","h21v00","h21v05",\
"h21v06","h21v10","h22v04","h22v05","h22v06","h23v04","h23v05","h23v06","h23v09",\
"h24v01","h24v05","h25v04","h25v05","h25v09","h26v04","h27v04","h27v05","h27v10",\
"h28v04","h28v09","h28v10","h29v09","h29v10","h30v05","h30v09","h30v10","h31v10",\
"h32v10","h35v09"]
'''
tile = ["h11v01","h12v01","h13v01","h14v00","h14v01","h15v00","h15v01","h16v00","h16v01",\
"h17v00","h17v01","h18v00","h18v01","h19v00","h19v01","h20v00","h20v01","h21v00",\
"h21v01","h22v01","h23v01","h24v01"]
#i = np.arange(0,5)
#segtiles = tile[0:60] #lotus
#segtiles = tile[60:120] #for peony
#segtiles = tile[120:180] #for cattle
#segtiles = tile[180:240] # crane
#segtiles = tile[240:287] #lily
#segtiles = tile[0:12] #for cattle
#segtiles = tile[12:24] #for lily
#segtiles = tile[24:36] #for crane
#segtiles = tile[36:48] #for lotus
#segtiles = tile[48:65] #for poeny
#smooth(segtiles)
#process_list(segtiles, mp=True, count=6)
#segtiles = tile[0:5] #for cattle
#segtiles = tile[5:10] #for lily
#segtiles = tile[10:15] #for crane
#segtiles = tile[15:20] #for lotus
segtiles = tile[20:22] #for poeny
process_list(segtiles, mp=True, count=5)
| apache-2.0 |
wangshiphys/HamiltonianPy | HamiltonianPy/tools/Octahedron.py | 1 | 2368 | """
Plot the octahedron in 3D
"""
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
color_map = plt.get_cmap("tab10")
colors = color_map(range(color_map.N))
marker_size_center = 25
marker_size_corner = 22
line_width = 8
# Coordinates of the points
point0 = [0, 0, 0]
point1 = [1, -1, 0]
point2 = [1, 1, 0]
point3 = [-1, 1, 0]
point4 = [-1, -1, 0]
point5 = [0, 0, np.sqrt(2)]
point6 = [0, 0, -np.sqrt(2)]
fig = plt.figure()
ax = fig.add_subplot(111, projection="3d")
lines = [
[np.array([point1, point2]), "solid", line_width, None],
[np.array([point2, point3]), "solid", line_width, None],
[np.array([point3, point4]), "dashed", line_width, None],
[np.array([point4, point1]), "solid", line_width, None],
[np.array([point0, point1]), "dashed", line_width/2, "gray"],
[np.array([point0, point2]), "dashed", line_width/2, "gray"],
[np.array([point0, point3]), "dashed", line_width/2, "gray"],
[np.array([point0, point4]), "dashed", line_width/2, "gray"],
[np.array([point0, point5]), "dashed", line_width/2, "gray"],
[np.array([point0, point6]), "dashed", line_width/2, "gray"],
[np.array([point5, point1]), "solid", line_width, None],
[np.array([point5, point2]), "solid", line_width, None],
[np.array([point5, point3]), "solid", line_width, None],
[np.array([point5, point4]), "solid", line_width, None],
[np.array([point6, point1]), "solid", line_width, None],
[np.array([point6, point2]), "solid", line_width, None],
[np.array([point6, point3]), "dashed", line_width, None],
[np.array([point6, point4]), "dashed", line_width, None],
]
for endpoints, ls, lw, color in lines:
ax.plot(
endpoints[:, 0], endpoints[:, 1], endpoints[:, 2],
lw=lw, ls=ls, color=color,
)
# Plot the 7 points
ax.plot(
point0[0:1], point0[1:2], point0[2:], ls="",
marker="o", color=colors[0], markersize=marker_size_center
)
points = np.array([point1, point2, point3, point4, point5, point6])
ax.plot(
points[:, 0], points[:, 1], points[:, 2], ls="",
marker="o", color=colors[1], markersize=marker_size_corner
)
azimuth = 0
# elevation = -36
elevation = 20
ax.view_init(elevation, azimuth)
ax.set_axis_off()
plt.show()
fig.savefig(
"Octahedron_azimuth={0}_elevation={1}.png".format(azimuth, elevation),
)
plt.close("all")
| gpl-3.0 |
DonBeo/statsmodels | statsmodels/formula/formulatools.py | 32 | 3846 | from statsmodels.compat.python import iterkeys
import statsmodels.tools.data as data_util
from patsy import dmatrices, NAAction
import numpy as np
# if users want to pass in a different formula framework, they can
# add their handler here. how to do it interactively?
# this is a mutable object, so editing it should show up in the below
formula_handler = {}
class NAAction(NAAction):
# monkey-patch so we can handle missing values in 'extra' arrays later
def _handle_NA_drop(self, values, is_NAs, origins):
total_mask = np.zeros(is_NAs[0].shape[0], dtype=bool)
for is_NA in is_NAs:
total_mask |= is_NA
good_mask = ~total_mask
self.missing_mask = total_mask
# "..." to handle 1- versus 2-dim indexing
return [v[good_mask, ...] for v in values]
def handle_formula_data(Y, X, formula, depth=0, missing='drop'):
"""
Returns endog, exog, and the model specification from arrays and formula
Parameters
----------
Y : array-like
Either endog (the LHS) of a model specification or all of the data.
Y must define __getitem__ for now.
X : array-like
Either exog or None. If all the data for the formula is provided in
Y then you must explicitly set X to None.
formula : str or patsy.model_desc
You can pass a handler by import formula_handler and adding a
key-value pair where the key is the formula object class and
the value is a function that returns endog, exog, formula object
Returns
-------
endog : array-like
Should preserve the input type of Y,X
exog : array-like
Should preserve the input type of Y,X. Could be None.
"""
# half ass attempt to handle other formula objects
if isinstance(formula, tuple(iterkeys(formula_handler))):
return formula_handler[type(formula)]
na_action = NAAction(on_NA=missing)
if X is not None:
if data_util._is_using_pandas(Y, X):
result = dmatrices(formula, (Y, X), depth,
return_type='dataframe', NA_action=na_action)
else:
result = dmatrices(formula, (Y, X), depth,
return_type='dataframe', NA_action=na_action)
else:
if data_util._is_using_pandas(Y, None):
result = dmatrices(formula, Y, depth, return_type='dataframe',
NA_action=na_action)
else:
result = dmatrices(formula, Y, depth, return_type='dataframe',
NA_action=na_action)
# if missing == 'raise' there's not missing_mask
missing_mask = getattr(na_action, 'missing_mask', None)
if not np.any(missing_mask):
missing_mask = None
if len(result) > 1: # have RHS design
design_info = result[1].design_info # detach it from DataFrame
else:
design_info = None
# NOTE: is there ever a case where we'd need LHS design_info?
return result, missing_mask, design_info
def _remove_intercept_patsy(terms):
"""
Remove intercept from Patsy terms.
"""
from patsy.desc import INTERCEPT
if INTERCEPT in terms:
terms.remove(INTERCEPT)
return terms
def _has_intercept(design_info):
from patsy.desc import INTERCEPT
return INTERCEPT in design_info.terms
def _intercept_idx(design_info):
"""
Returns boolean array index indicating which column holds the intercept
"""
from patsy.desc import INTERCEPT
from numpy import array
return array([INTERCEPT == i for i in design_info.terms])
def make_hypotheses_matrices(model_results, test_formula):
"""
"""
from patsy.constraint import linear_constraint
exog_names = model_results.model.exog_names
LC = linear_constraint(test_formula, exog_names)
return LC
| bsd-3-clause |
mraspaud/mpop | mpop/imageo/formats/tifffile.py | 2 | 179637 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# tifffile.py
# Copyright (c) 2008-2014, Christoph Gohlke
# Copyright (c) 2008-2014, The Regents of the University of California
# Produced at the Laboratory for Fluorescence Dynamics
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holders nor the names of any
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Read and write image data from and to TIFF files.
Image and metadata can be read from TIFF, BigTIFF, OME-TIFF, STK, LSM, NIH,
SGI, ImageJ, MicroManager, FluoView, SEQ and GEL files.
Only a subset of the TIFF specification is supported, mainly uncompressed
and losslessly compressed 2**(0 to 6) bit integer, 16, 32 and 64-bit float,
grayscale and RGB(A) images, which are commonly used in bio-scientific imaging.
Specifically, reading JPEG and CCITT compressed image data or EXIF, IPTC, GPS,
and XMP metadata is not implemented.
Only primary info records are read for STK, FluoView, MicroManager, and
NIH image formats.
TIFF, the Tagged Image File Format, is under the control of Adobe Systems.
BigTIFF allows for files greater than 4 GB. STK, LSM, FluoView, SGI, SEQ, GEL,
and OME-TIFF, are custom extensions defined by Molecular Devices (Universal
Imaging Corporation), Carl Zeiss MicroImaging, Olympus, Silicon Graphics
International, Media Cybernetics, Molecular Dynamics, and the Open Microscopy
Environment consortium respectively.
For command line usage run ``python tifffile.py --help``
:Author:
`Christoph Gohlke <http://www.lfd.uci.edu/~gohlke/>`_
:Organization:
Laboratory for Fluorescence Dynamics, University of California, Irvine
:Version: 2014.08.24
Requirements
------------
* `CPython 2.7 or 3.4 <http://www.python.org>`_
* `Numpy 1.8.2 <http://www.numpy.org>`_
* `Matplotlib 1.4 <http://www.matplotlib.org>`_ (optional for plotting)
* `Tifffile.c 2013.11.05 <http://www.lfd.uci.edu/~gohlke/>`_
(recommended for faster decoding of PackBits and LZW encoded strings)
Notes
-----
The API is not stable yet and might change between revisions.
Tested on little-endian platforms only.
Other Python packages and modules for reading bio-scientific TIFF files:
* `Imread <http://luispedro.org/software/imread>`_
* `PyLibTiff <http://code.google.com/p/pylibtiff>`_
* `SimpleITK <http://www.simpleitk.org>`_
* `PyLSM <https://launchpad.net/pylsm>`_
* `PyMca.TiffIO.py <http://pymca.sourceforge.net/>`_ (same as fabio.TiffIO)
* `BioImageXD.Readers <http://www.bioimagexd.net/>`_
* `Cellcognition.io <http://cellcognition.org/>`_
* `CellProfiler.bioformats
<https://github.com/CellProfiler/python-bioformats>`_
Acknowledgements
----------------
* Egor Zindy, University of Manchester, for cz_lsm_scan_info specifics.
* Wim Lewis for a bug fix and some read_cz_lsm functions.
* Hadrien Mary for help on reading MicroManager files.
References
----------
(1) TIFF 6.0 Specification and Supplements. Adobe Systems Incorporated.
http://partners.adobe.com/public/developer/tiff/
(2) TIFF File Format FAQ. http://www.awaresystems.be/imaging/tiff/faq.html
(3) MetaMorph Stack (STK) Image File Format.
http://support.meta.moleculardevices.com/docs/t10243.pdf
(4) Image File Format Description LSM 5/7 Release 6.0 (ZEN 2010).
Carl Zeiss MicroImaging GmbH. BioSciences. May 10, 2011
(5) File Format Description - LSM 5xx Release 2.0.
http://ibb.gsf.de/homepage/karsten.rodenacker/IDL/Lsmfile.doc
(6) The OME-TIFF format.
http://www.openmicroscopy.org/site/support/file-formats/ome-tiff
(7) UltraQuant(r) Version 6.0 for Windows Start-Up Guide.
http://www.ultralum.com/images%20ultralum/pdf/UQStart%20Up%20Guide.pdf
(8) Micro-Manager File Formats.
http://www.micro-manager.org/wiki/Micro-Manager_File_Formats
(9) Tags for TIFF and Related Specifications. Digital Preservation.
http://www.digitalpreservation.gov/formats/content/tiff_tags.shtml
Examples
--------
>>> data = numpy.random.rand(5, 301, 219)
>>> imsave('temp.tif', data)
>>> image = imread('temp.tif')
>>> numpy.testing.assert_array_equal(image, data)
>>> with TiffFile('temp.tif') as tif:
... images = tif.asarray()
... for page in tif:
... for tag in page.tags.values():
... t = tag.name, tag.value
... image = page.asarray()
"""
from __future__ import division, print_function
import sys
import os
import re
import glob
import math
import zlib
import time
import json
import struct
import warnings
import tempfile
import datetime
import collections
from fractions import Fraction
from xml.etree import cElementTree as etree
import numpy
try:
import _tifffile
except ImportError:
warnings.warn(
"failed to import the optional _tifffile C extension module.\n"
"Loading of some compressed images will be slow.\n"
"Tifffile.c can be obtained at http://www.lfd.uci.edu/~gohlke/")
__version__ = '2014.08.24'
__docformat__ = 'restructuredtext en'
__all__ = ('imsave', 'imread', 'imshow', 'TiffFile', 'TiffWriter',
'TiffSequence')
def imsave(filename, data, **kwargs):
"""Write image data to TIFF file.
Refer to the TiffWriter class and member functions for documentation.
Parameters
----------
filename : str
Name of file to write.
data : array_like
Input image. The last dimensions are assumed to be image depth,
height, width, and samples.
kwargs : dict
Parameters 'byteorder', 'bigtiff', and 'software' are passed to
the TiffWriter class.
Parameters 'photometric', 'planarconfig', 'resolution',
'description', 'compress', 'volume', and 'extratags' are passed to
the TiffWriter.save function.
Examples
--------
>>> data = numpy.random.rand(2, 5, 3, 301, 219)
>>> description = u'{"shape": %s}' % str(list(data.shape))
>>> imsave('temp.tif', data, compress=6,
... extratags=[(270, 's', 0, description, True)])
Save tiles with compression enabled
>>> data = numpy.random.rand(400, 300)
>>> imsave('temp.tif', data, compress=6, tile_width=150, tile_length=100)
>>> with TiffFile('temp.tif') as tif:
... image = tif.asarray()
... page = tif[0]
>>> numpy.testing.assert_array_equal(image, data)
>>> page.tags['tile_width'].value
150
>>> page.tags['tile_length'].value
100
Save tiles with compression disabled
>>> data = numpy.random.rand(400, 300)
>>> imsave('temp.tif', data, compress=0, tile_width=150, tile_length=100)
>>> with TiffFile('temp.tif') as tif:
... image = tif.asarray()
... page = tif[0]
>>> numpy.testing.assert_array_equal(image, data)
>>> page.tags['tile_width'].value
150
>>> page.tags['tile_length'].value
100
Save tiles with compression enabled, 3 samples per pixel
>>> data = numpy.random.rand(3, 400, 300)
>>> imsave('temp.tif', data, compress=6, tile_width=150, tile_length=100)
>>> with TiffFile('temp.tif') as tif:
... image = tif.asarray()
... page = tif[0]
>>> numpy.testing.assert_array_equal(image, data)
>>> page.tags['tile_width'].value
150
>>> page.tags['tile_length'].value
100
Save colormap
>>> data = (numpy.random.rand(400, 300)*250).astype(numpy.uint8)
>>> cmap1ch = [x*256 for x in range(256)]
>>> cmap = cmap1ch + cmap1ch + cmap1ch
>>> data_colored = numpy.take(cmap1ch, data)
>>> data_colored = numpy.dstack((data_colored, data_colored, data_colored))
>>> data_colored = numpy.swapaxes(numpy.swapaxes(data_colored,0,2),1,2)
>>> imsave('temp.tif', data, photometric='palette', colormap = cmap)
>>> with TiffFile('temp.tif') as tif:
... image = tif.asarray()
... page = tif[0]
>>> numpy.testing.assert_array_equal(image, data_colored)
>>> numpy.testing.assert_array_equal(page.tags['color_map'].value, cmap)
"""
tifargs = {}
for key in ('byteorder', 'bigtiff', 'software', 'writeshape'):
if key in kwargs:
tifargs[key] = kwargs[key]
del kwargs[key]
if 'writeshape' not in kwargs:
kwargs['writeshape'] = True
if 'bigtiff' not in tifargs and data.size*data.dtype.itemsize > 2000*2**20:
tifargs['bigtiff'] = True
with TiffWriter(filename, **tifargs) as tif:
tif.save(data, **kwargs)
class TiffWriter(object):
"""Write image data to TIFF file.
TiffWriter instances must be closed using the close method, which is
automatically called when using the 'with' statement.
Examples
--------
>>> data = numpy.random.rand(2, 5, 3, 301, 219)
>>> with TiffWriter('temp.tif', bigtiff=True) as tif:
... for i in range(data.shape[0]):
... tif.save(data[i], compress=6)
"""
TYPES = {'B': 1, 's': 2, 'H': 3, 'I': 4, '2I': 5, 'b': 6,
'h': 8, 'i': 9, 'f': 11, 'd': 12, 'Q': 16, 'q': 17}
TAGS = {
'new_subfile_type': 254, 'subfile_type': 255,
'image_width': 256, 'image_length': 257, 'bits_per_sample': 258,
'compression': 259, 'photometric': 262, 'fill_order': 266,
'document_name': 269, 'image_description': 270, 'strip_offsets': 273,
'orientation': 274, 'samples_per_pixel': 277, 'rows_per_strip': 278,
'strip_byte_counts': 279, 'x_resolution': 282, 'y_resolution': 283,
'planar_configuration': 284, 'page_name': 285, 'resolution_unit': 296,
'software': 305, 'datetime': 306, 'predictor': 317, 'color_map': 320,
'tile_width': 322, 'tile_length': 323, 'tile_offsets': 324,
'tile_byte_counts': 325, 'extra_samples': 338, 'sample_format': 339,
'image_depth': 32997, 'tile_depth': 32998}
def __init__(self, filename, bigtiff=False, byteorder=None,
software='tifffile.py'):
"""Create a new TIFF file for writing.
Use bigtiff=True when creating files greater than 2 GB.
Parameters
----------
filename : str
Name of file to write.
bigtiff : bool
If True, the BigTIFF format is used.
byteorder : {'<', '>'}
The endianness of the data in the file.
By default this is the system's native byte order.
software : str
Name of the software used to create the image.
Saved with the first page only.
"""
if byteorder not in (None, '<', '>'):
raise ValueError("invalid byteorder %s" % byteorder)
if byteorder is None:
byteorder = '<' if sys.byteorder == 'little' else '>'
self._byteorder = byteorder
self._software = software
self._fh = open(filename, 'wb')
self._fh.write({'<': b'II', '>': b'MM'}[byteorder])
if bigtiff:
self._bigtiff = True
self._offset_size = 8
self._tag_size = 20
self._numtag_format = 'Q'
self._offset_format = 'Q'
self._val_format = '8s'
self._fh.write(struct.pack(byteorder+'HHH', 43, 8, 0))
else:
self._bigtiff = False
self._offset_size = 4
self._tag_size = 12
self._numtag_format = 'H'
self._offset_format = 'I'
self._val_format = '4s'
self._fh.write(struct.pack(byteorder+'H', 42))
# first IFD
self._ifd_offset = self._fh.tell()
self._fh.write(struct.pack(byteorder+self._offset_format, 0))
def save(self, data, photometric=None, planarconfig=None, resolution=None,
description=None, volume=False, writeshape=False, compress=0,
colormap=None, extrasamples_type=1, tile_width=None,
tile_length=None, extratags=()):
"""Write image data to TIFF file.
Image data are written in one stripe per plane.
Dimensions larger than 2 to 4 (depending on photometric mode, planar
configuration, and SGI mode) are flattened and saved as separate pages.
The 'sample_format' and 'bits_per_sample' TIFF tags are derived from
the data type.
Parameters
----------
data : array_like
Input image. The last dimensions are assumed to be image depth,
height, width, and samples.
photometric : {'minisblack', 'miniswhite', 'rgb', 'palette'}
The color space of the image data.
By default this setting is inferred from the data shape.
planarconfig : {'contig', 'planar'}
Specifies if samples are stored contiguous or in separate planes.
By default this setting is inferred from the data shape.
'contig': last dimension contains samples.
'planar': third last dimension contains samples.
resolution : (float, float) or ((int, int), (int, int))
X and Y resolution in dots per inch as float or rational numbers.
description : str
The subject of the image. Saved with the first page only.
compress : int
Values from 0 to 9 controlling the level of zlib compression.
If 0, data are written uncompressed (default).
volume : bool
If True, volume data are stored in one tile (if applicable) using
the SGI image_depth and tile_depth tags.
Image width and depth must be multiple of 16.
Few software can read this format, e.g. MeVisLab.
writeshape : bool
If True, write the data shape to the image_description tag
if necessary and no other description is given.
colormap : list of uint16's (3 concatenated lists for RGB)
Individual RGB arrays describing the color value for the
corresponding data value. For example, image data with a data
type of unsigned 8-bit integers have 256 possible values (0-255).
So the colormap will have 3*256 values ranging from 0 to
65535 (2**16 - 1).
tile_width : int
If not none, data is stored in tiles of size
(tile_length, tile_width). Only in conjunction with
defined tile_length (default : None)
tile_length : int
If not none, data is stored in tiles of size
(tile_length, tile_width). Only in conjunction with
defined tile_width (default : None)
extratags: sequence of tuples
Additional tags as [(code, dtype, count, value, writeonce)].
code : int
The TIFF tag Id.
dtype : str
Data type of items in 'value' in Python struct format.
One of B, s, H, I, 2I, b, h, i, f, d, Q, or q.
count : int
Number of data values. Not used for string values.
value : sequence
'Count' values compatible with 'dtype'.
writeonce : bool
If True, the tag is written to the first page only.
"""
if photometric not in (None, 'minisblack', 'miniswhite', 'rgb', 'palette'):
raise ValueError("invalid photometric %s" % photometric)
if planarconfig not in (None, 'contig', 'planar'):
raise ValueError("invalid planarconfig %s" % planarconfig)
if not 0 <= compress <= 9:
raise ValueError("invalid compression level %s" % compress)
fh = self._fh
byteorder = self._byteorder
numtag_format = self._numtag_format
val_format = self._val_format
offset_format = self._offset_format
offset_size = self._offset_size
tag_size = self._tag_size
data = numpy.asarray(data, dtype=byteorder+data.dtype.char, order='C')
data_shape = shape = data.shape
data = numpy.atleast_2d(data)
# enable tile writing if tile width and length specified
if tile_length is not None and tile_width is not None:
write_tiles = 1
else:
write_tiles = 0
# normalize shape of data
samplesperpixel = 1
extrasamples = 0
if volume and data.ndim < 3:
volume = False
if photometric is None:
if planarconfig:
photometric = 'rgb'
elif data.ndim > 2 and shape[-1] in (3, 4):
photometric = 'rgb'
elif volume and data.ndim > 3 and shape[-4] in (3, 4):
photometric = 'rgb'
elif data.ndim > 2 and shape[-3] in (3, 4):
photometric = 'rgb'
else:
photometric = 'minisblack'
if planarconfig and len(shape) <= (3 if volume else 2) and (
photometric != 'palette'):
planarconfig = None
photometric = 'minisblack'
if photometric == 'rgb':
if len(shape) < 3:
raise ValueError("not a RGB(A) image")
if len(shape) < 4:
volume = False
if planarconfig is None:
if shape[-1] in (3, 4):
planarconfig = 'contig'
elif shape[-4 if volume else -3] in (3, 4):
planarconfig = 'planar'
elif shape[-1] > shape[-4 if volume else -3]:
planarconfig = 'planar'
else:
planarconfig = 'contig'
if planarconfig == 'contig':
data = data.reshape((-1, 1) + shape[(-4 if volume else -3):])
samplesperpixel = data.shape[-1]
else:
data = data.reshape(
(-1,) + shape[(-4 if volume else -3):] + (1,))
samplesperpixel = data.shape[1]
if samplesperpixel > 3:
extrasamples = samplesperpixel - 3
elif photometric == 'palette':
if len(shape) > 2:
raise ValueError("not a 1-channel image")
samplesperpixel = 1
planarconfig = None
# remove trailing 1s
while len(shape) > 2 and shape[-1] == 1:
shape = shape[:-1]
if len(shape) < 3:
volume = False
data = data.reshape(
(-1, 1) + shape[(-3 if volume else -2):] + (1,))
elif planarconfig and len(shape) > (3 if volume else 2):
if planarconfig == 'contig':
data = data.reshape((-1, 1) + shape[(-4 if volume else -3):])
samplesperpixel = data.shape[-1]
else:
data = data.reshape(
(-1,) + shape[(-4 if volume else -3):] + (1,))
samplesperpixel = data.shape[1]
extrasamples = samplesperpixel - 1
else:
planarconfig = None
# remove trailing 1s
while len(shape) > 2 and shape[-1] == 1:
shape = shape[:-1]
if len(shape) < 3:
volume = False
if False and (
len(shape) > (3 if volume else 2) and shape[-1] < 5 and
all(shape[-1] < i
for i in shape[(-4 if volume else -3):-1])):
# DISABLED: non-standard TIFF, e.g. (220, 320, 2)
planarconfig = 'contig'
samplesperpixel = shape[-1]
data = data.reshape((-1, 1) + shape[(-4 if volume else -3):])
else:
data = data.reshape(
(-1, 1) + shape[(-3 if volume else -2):] + (1,))
if samplesperpixel == 2:
warnings.warn("writing non-standard TIFF (samplesperpixel 2)")
if volume and (data.shape[-2] % 16 or data.shape[-3] % 16):
warnings.warn("volume width or length are not multiple of 16")
volume = False
data = numpy.swapaxes(data, 1, 2)
data = data.reshape(
(data.shape[0] * data.shape[1],) + data.shape[2:])
# data.shape is now normalized 5D or 6D, depending on volume
# (pages, planar_samples, (depth,) height, width, contig_samples)
assert len(data.shape) in (5, 6)
shape = data.shape
bytestr = bytes if sys.version[0] == '2' else (
lambda x: bytes(x, 'utf-8') if isinstance(x, str) else x)
tags = [] # list of (code, ifdentry, ifdvalue, writeonce)
if volume or write_tiles:
# use tiles to save volume data or explicitly requests
tag_byte_counts = TiffWriter.TAGS['tile_byte_counts']
tag_offsets = TiffWriter.TAGS['tile_offsets']
else:
# else use strips
tag_byte_counts = TiffWriter.TAGS['strip_byte_counts']
tag_offsets = TiffWriter.TAGS['strip_offsets']
def pack(fmt, *val):
return struct.pack(byteorder+fmt, *val)
def addtag(code, dtype, count, value, writeonce=False):
# Compute ifdentry & ifdvalue bytes from code, dtype, count, value.
# Append (code, ifdentry, ifdvalue, writeonce) to tags list.
code = int(TiffWriter.TAGS.get(code, code))
try:
tifftype = TiffWriter.TYPES[dtype]
except KeyError:
raise ValueError("unknown dtype %s" % dtype)
rawcount = count
if dtype == 's':
value = bytestr(value) + b'\0'
count = rawcount = len(value)
value = (value, )
if len(dtype) > 1:
count *= int(dtype[:-1])
dtype = dtype[-1]
ifdentry = [pack('HH', code, tifftype),
pack(offset_format, rawcount)]
ifdvalue = None
if count == 1:
if isinstance(value, (tuple, list)):
value = value[0]
ifdentry.append(pack(val_format, pack(dtype, value)))
elif struct.calcsize(dtype) * count <= offset_size:
ifdentry.append(pack(val_format,
pack(str(count)+dtype, *value)))
else:
ifdentry.append(pack(offset_format, 0))
ifdvalue = pack(str(count)+dtype, *value)
tags.append((code, b''.join(ifdentry), ifdvalue, writeonce))
def rational(arg, max_denominator=1000000):
# return nominator and denominator from float or two integers
try:
f = Fraction.from_float(arg)
except TypeError:
f = Fraction(arg[0], arg[1])
f = f.limit_denominator(max_denominator)
return f.numerator, f.denominator
if self._software:
addtag('software', 's', 0, self._software, writeonce=True)
self._software = None # only save to first page
if description:
addtag('image_description', 's', 0, description, writeonce=True)
elif writeshape and shape[0] > 1 and shape != data_shape:
addtag('image_description', 's', 0,
"shape=(%s)" % (",".join('%i' % i for i in data_shape)),
writeonce=True)
addtag('datetime', 's', 0,
datetime.datetime.now().strftime("%Y:%m:%d %H:%M:%S"),
writeonce=True)
addtag('compression', 'H', 1, 32946 if compress else 1)
addtag('orientation', 'H', 1, 1)
addtag('image_width', 'I', 1, shape[-2])
addtag('image_length', 'I', 1, shape[-3])
if volume:
addtag('image_depth', 'I', 1, shape[-4])
addtag('tile_depth', 'I', 1, shape[-4])
addtag('tile_width', 'I', 1, shape[-2])
addtag('tile_length', 'I', 1, shape[-3])
elif write_tiles:
addtag('tile_width', 'I', 1, tile_width)
addtag('tile_length', 'I', 1, tile_length)
addtag('new_subfile_type', 'I', 1, 0 if shape[0] == 1 else 2)
# addtag('sample_format', 'H', 1,
# {'u': 1, 'i': 2, 'f': 3, 'c': 6}[data.dtype.kind])
addtag('sample_format', 'H', samplesperpixel,
({'u': 1, 'i': 2, 'f': 3, 'c': 6}[data.dtype.kind],) * samplesperpixel)
addtag('photometric', 'H', 1,
{'miniswhite': 0,
'minisblack': 1,
'rgb': 2,
'palette': 3}[photometric])
if photometric == 'palette':
if colormap == None:
raise ValueError(
"photometric 'palette' specified but colormap missing")
else:
addtag('color_map', 'H',
3 * (2 ** (data.dtype.itemsize * 8 * samplesperpixel)),
colormap)
addtag('samples_per_pixel', 'H', 1, samplesperpixel)
if planarconfig and samplesperpixel > 1:
addtag('planar_configuration', 'H', 1, 1
if planarconfig == 'contig' else 2)
addtag('bits_per_sample', 'H', samplesperpixel,
(data.dtype.itemsize * 8, ) * samplesperpixel)
else:
addtag('bits_per_sample', 'H', 1, data.dtype.itemsize * 8)
if extrasamples:
if photometric == 'rgb' and extrasamples == 1:
addtag('extra_samples', 'H', 1, extrasamples_type) # associated alpha channel
else:
addtag('extra_samples', 'H', extrasamples, (0,) * extrasamples)
if resolution:
addtag('x_resolution', '2I', 1, rational(resolution[0]))
addtag('y_resolution', '2I', 1, rational(resolution[1]))
addtag('resolution_unit', 'H', 1, 2)
if not write_tiles:
addtag('rows_per_strip', 'I', 1,
shape[-3] * (shape[-4] if volume else 1))
if write_tiles:
# use multiple tiles per plane
tiles_x = (shape[3] + tile_width - 1) // tile_width
tiles_y = (shape[2] + tile_length - 1) // tile_length
strip_byte_counts = \
(tile_width * tile_length * shape[-1] * data.dtype.itemsize,) \
* shape[1] * tiles_x * tiles_y
else:
# use one strip or tile per plane
tiles_x = tiles_y = 1
strip_byte_counts = \
(data[0, 0].size * data.dtype.itemsize,) * shape[1]
addtag(tag_byte_counts,
offset_format, shape[1] * tiles_x * tiles_y, strip_byte_counts)
addtag(tag_offsets,
offset_format, shape[1] * tiles_x * tiles_y,
(0, ) * shape[1] * tiles_x * tiles_y)
# add extra tags from users
for t in extratags:
addtag(*t)
# the entries in an IFD must be sorted in ascending order by tag code
tags = sorted(tags, key=lambda x: x[0])
if not self._bigtiff and (fh.tell() + data.size*data.dtype.itemsize
> 2**31-1):
raise ValueError("data too large for non-bigtiff file")
for pageindex in range(shape[0]):
# update pointer at ifd_offset
pos = fh.tell()
fh.seek(self._ifd_offset)
fh.write(pack(offset_format, pos))
fh.seek(pos)
# write ifdentries
fh.write(pack(numtag_format, len(tags)))
tag_offset = fh.tell()
fh.write(b''.join(t[1] for t in tags))
self._ifd_offset = fh.tell()
fh.write(pack(offset_format, 0)) # offset to next IFD
# write tag values and patch offsets in ifdentries, if necessary
for tagindex, tag in enumerate(tags):
if tag[2]:
pos = fh.tell()
fh.seek(tag_offset + tagindex*tag_size + offset_size + 4)
fh.write(pack(offset_format, pos))
fh.seek(pos)
if tag[0] == tag_offsets:
strip_offsets_offset = pos
elif tag[0] == tag_byte_counts:
strip_byte_counts_offset = pos
fh.write(tag[2])
# write image data
data_offset = fh.tell()
if write_tiles:
# multiple tiles per page
if compress:
# reset and use compress sizes
strip_byte_counts = []
for plane in data[pageindex]:
for ty in xrange(0, tiles_y):
for tx in xrange(0, tiles_x):
# allocate fixed size tile filled with zeros
tile = numpy.zeros((tile_width * tile_length,
shape[-1]), data.dtype)
# clipping right and bottom if necessary
# tile length filled with image data
itl = min(tile_length,
shape[2] - ty*tile_length)
# tile width filled with image data
itw = min(tile_width,
shape[3] - tx*tile_width)
ioffs = tx*tile_width
for tl in xrange(0, itl):
# copy data to tile line
ir = ty*tile_length+tl
tile[tl*tile_width:tl*tile_width+itw] \
= plane[ir, ioffs:ioffs+itw]
if compress:
tile = zlib.compress(tile, compress)
strip_byte_counts.append(len(tile))
fh.write(tile)
else:
tile.tofile(fh)
fh.flush()
else:
# one strip/tile per page
if compress:
strip_byte_counts = []
for plane in data[pageindex]:
plane = zlib.compress(plane, compress)
strip_byte_counts.append(len(plane))
fh.write(plane)
else:
# if this fails try update Python/numpy
data[pageindex].tofile(fh)
fh.flush()
# update strip and tile offsets and byte_counts if necessary
pos = fh.tell()
for tagindex, tag in enumerate(tags):
if tag[0] == tag_offsets: # strip or tile offsets
if tag[2]:
fh.seek(strip_offsets_offset)
strip_offset = data_offset
for size in strip_byte_counts:
fh.write(pack(offset_format, strip_offset))
strip_offset += size
else:
fh.seek(tag_offset + tagindex*tag_size +
offset_size + 4)
fh.write(pack(offset_format, data_offset))
elif tag[0] == tag_byte_counts: # strip or tile byte_counts
if compress:
if tag[2]:
fh.seek(strip_byte_counts_offset)
for size in strip_byte_counts:
fh.write(pack(offset_format, size))
else:
fh.seek(tag_offset + tagindex*tag_size +
offset_size + 4)
fh.write(pack(offset_format, strip_byte_counts[0]))
break
fh.seek(pos)
fh.flush()
# remove tags that should be written only once
if pageindex == 0:
tags = [t for t in tags if not t[-1]]
def close(self):
self._fh.close()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def imread(files, **kwargs):
"""Return image data from TIFF file(s) as numpy array.
The first image series is returned if no arguments are provided.
Parameters
----------
files : str or list
File name, glob pattern, or list of file names.
key : int, slice, or sequence of page indices
Defines which pages to return as array.
series : int
Defines which series of pages in file to return as array.
multifile : bool
If True (default), OME-TIFF data may include pages from multiple files.
pattern : str
Regular expression pattern that matches axes names and indices in
file names.
kwargs : dict
Additional parameters passed to the TiffFile or TiffSequence asarray
function.
Examples
--------
>>> im = imread('test.tif', key=0)
>>> im.shape
(256, 256, 4)
>>> ims = imread(['test.tif', 'test.tif'])
>>> ims.shape
(2, 256, 256, 4)
"""
kwargs_file = {}
if 'multifile' in kwargs:
kwargs_file['multifile'] = kwargs['multifile']
del kwargs['multifile']
else:
kwargs_file['multifile'] = True
kwargs_seq = {}
if 'pattern' in kwargs:
kwargs_seq['pattern'] = kwargs['pattern']
del kwargs['pattern']
if isinstance(files, basestring) and any(i in files for i in '?*'):
files = glob.glob(files)
if not files:
raise ValueError('no files found')
if len(files) == 1:
files = files[0]
if isinstance(files, basestring):
with TiffFile(files, **kwargs_file) as tif:
return tif.asarray(**kwargs)
else:
with TiffSequence(files, **kwargs_seq) as imseq:
return imseq.asarray(**kwargs)
class lazyattr(object):
"""Lazy object attribute whose value is computed on first access."""
__slots__ = ('func', )
def __init__(self, func):
self.func = func
def __get__(self, instance, owner):
if instance is None:
return self
value = self.func(instance)
if value is NotImplemented:
return getattr(super(owner, instance), self.func.__name__)
setattr(instance, self.func.__name__, value)
return value
class TiffFile(object):
"""Read image and metadata from TIFF, STK, LSM, and FluoView files.
TiffFile instances must be closed using the close method, which is
automatically called when using the 'with' statement.
Attributes
----------
pages : list
All TIFF pages in file.
series : list of Records(shape, dtype, axes, TiffPages)
TIFF pages with compatible shapes and types.
micromanager_metadata: dict
Extra MicroManager non-TIFF metadata in the file, if exists.
All attributes are read-only.
Examples
--------
>>> with TiffFile('test.tif') as tif:
... data = tif.asarray()
... data.shape
(256, 256, 4)
"""
def __init__(self, arg, name=None, offset=None, size=None,
multifile=True, multifile_close=True):
"""Initialize instance from file.
Parameters
----------
arg : str or open file
Name of file or open file object.
The file objects are closed in TiffFile.close().
name : str
Optional name of file in case 'arg' is a file handle.
offset : int
Optional start position of embedded file. By default this is
the current file position.
size : int
Optional size of embedded file. By default this is the number
of bytes from the 'offset' to the end of the file.
multifile : bool
If True (default), series may include pages from multiple files.
Currently applies to OME-TIFF only.
multifile_close : bool
If True (default), keep the handles of other files in multifile
series closed. This is inefficient when few files refer to
many pages. If False, the C runtime may run out of resources.
"""
self._fh = FileHandle(arg, name=name, offset=offset, size=size)
self.offset_size = None
self.pages = []
self._multifile = bool(multifile)
self._multifile_close = bool(multifile_close)
self._files = {self._fh.name: self} # cache of TiffFiles
try:
self._fromfile()
except Exception:
self._fh.close()
raise
@property
def filehandle(self):
"""Return file handle."""
return self._fh
@property
def filename(self):
"""Return name of file handle."""
return self._fh.name
def close(self):
"""Close open file handle(s)."""
for tif in self._files.values():
tif._fh.close()
self._files = {}
def _fromfile(self):
"""Read TIFF header and all page records from file."""
self._fh.seek(0)
try:
self.byteorder = {b'II': '<', b'MM': '>'}[self._fh.read(2)]
except KeyError:
raise ValueError("not a valid TIFF file")
version = struct.unpack(self.byteorder+'H', self._fh.read(2))[0]
if version == 43: # BigTiff
self.offset_size, zero = struct.unpack(self.byteorder+'HH',
self._fh.read(4))
if zero or self.offset_size != 8:
raise ValueError("not a valid BigTIFF file")
elif version == 42:
self.offset_size = 4
else:
raise ValueError("not a TIFF file")
self.pages = []
while True:
try:
page = TiffPage(self)
self.pages.append(page)
except StopIteration:
break
if not self.pages:
raise ValueError("empty TIFF file")
if self.is_micromanager:
# MicroManager files contain metadata not stored in TIFF tags.
self.micromanager_metadata = read_micromanager_metadata(self._fh)
if self.is_lsm:
self._fix_lsm_strip_offsets()
self._fix_lsm_strip_byte_counts()
def _fix_lsm_strip_offsets(self):
"""Unwrap strip offsets for LSM files greater than 4 GB."""
for series in self.series:
wrap = 0
previous_offset = 0
for page in series.pages:
strip_offsets = []
for current_offset in page.strip_offsets:
if current_offset < previous_offset:
wrap += 2**32
strip_offsets.append(current_offset + wrap)
previous_offset = current_offset
page.strip_offsets = tuple(strip_offsets)
def _fix_lsm_strip_byte_counts(self):
"""Set strip_byte_counts to size of compressed data.
The strip_byte_counts tag in LSM files contains the number of bytes
for the uncompressed data.
"""
if not self.pages:
return
strips = {}
for page in self.pages:
assert len(page.strip_offsets) == len(page.strip_byte_counts)
for offset, bytecount in zip(page.strip_offsets,
page.strip_byte_counts):
strips[offset] = bytecount
offsets = sorted(strips.keys())
offsets.append(min(offsets[-1] + strips[offsets[-1]], self._fh.size))
for i, offset in enumerate(offsets[:-1]):
strips[offset] = min(strips[offset], offsets[i+1] - offset)
for page in self.pages:
if page.compression:
page.strip_byte_counts = tuple(
strips[offset] for offset in page.strip_offsets)
@lazyattr
def series(self):
"""Return series of TiffPage with compatible shape and properties."""
if not self.pages:
return []
series = []
page0 = self.pages[0]
if self.is_ome:
series = self._omeseries()
elif self.is_fluoview:
dims = {b'X': 'X', b'Y': 'Y', b'Z': 'Z', b'T': 'T',
b'WAVELENGTH': 'C', b'TIME': 'T', b'XY': 'R',
b'EVENT': 'V', b'EXPOSURE': 'L'}
mmhd = list(reversed(page0.mm_header.dimensions))
series = [Record(
axes=''.join(dims.get(i[0].strip().upper(), 'Q')
for i in mmhd if i[1] > 1),
shape=tuple(int(i[1]) for i in mmhd if i[1] > 1),
pages=self.pages, dtype=numpy.dtype(page0.dtype))]
elif self.is_lsm:
lsmi = page0.cz_lsm_info
axes = CZ_SCAN_TYPES[lsmi.scan_type]
if page0.is_rgb:
axes = axes.replace('C', '').replace('XY', 'XYC')
axes = axes[::-1]
shape = tuple(getattr(lsmi, CZ_DIMENSIONS[i]) for i in axes)
pages = [p for p in self.pages if not p.is_reduced]
series = [Record(axes=axes, shape=shape, pages=pages,
dtype=numpy.dtype(pages[0].dtype))]
if len(pages) != len(self.pages): # reduced RGB pages
pages = [p for p in self.pages if p.is_reduced]
cp = 1
i = 0
while cp < len(pages) and i < len(shape)-2:
cp *= shape[i]
i += 1
shape = shape[:i] + pages[0].shape
axes = axes[:i] + 'CYX'
series.append(Record(axes=axes, shape=shape, pages=pages,
dtype=numpy.dtype(pages[0].dtype)))
elif self.is_imagej:
shape = []
axes = []
ij = page0.imagej_tags
if 'frames' in ij:
shape.append(ij['frames'])
axes.append('T')
if 'slices' in ij:
shape.append(ij['slices'])
axes.append('Z')
if 'channels' in ij and not self.is_rgb:
shape.append(ij['channels'])
axes.append('C')
remain = len(self.pages) // (product(shape) if shape else 1)
if remain > 1:
shape.append(remain)
axes.append('I')
shape.extend(page0.shape)
axes.extend(page0.axes)
axes = ''.join(axes)
series = [Record(pages=self.pages, shape=tuple(shape), axes=axes,
dtype=numpy.dtype(page0.dtype))]
elif self.is_nih:
if len(self.pages) == 1:
shape = page0.shape
axes = page0.axes
else:
shape = (len(self.pages),) + page0.shape
axes = 'I' + page0.axes
series = [Record(pages=self.pages, shape=shape, axes=axes,
dtype=numpy.dtype(page0.dtype))]
elif page0.is_shaped:
# TODO: shaped files can contain multiple series
shape = page0.tags['image_description'].value[7:-1]
shape = tuple(int(i) for i in shape.split(b','))
series = [Record(pages=self.pages, shape=shape,
axes='Q' * len(shape),
dtype=numpy.dtype(page0.dtype))]
# generic detection of series
if not series:
shapes = []
pages = {}
for page in self.pages:
if not page.shape:
continue
shape = page.shape + (page.axes,
page.compression in TIFF_DECOMPESSORS)
if shape not in pages:
shapes.append(shape)
pages[shape] = [page]
else:
pages[shape].append(page)
series = [Record(pages=pages[s],
axes=(('I' + s[-2])
if len(pages[s]) > 1 else s[-2]),
dtype=numpy.dtype(pages[s][0].dtype),
shape=((len(pages[s]), ) + s[:-2]
if len(pages[s]) > 1 else s[:-2]))
for s in shapes]
# remove empty series, e.g. in MD Gel files
series = [s for s in series if sum(s.shape) > 0]
return series
def asarray(self, key=None, series=None, memmap=False):
"""Return image data from multiple TIFF pages as numpy array.
By default the first image series is returned.
Parameters
----------
key : int, slice, or sequence of page indices
Defines which pages to return as array.
series : int
Defines which series of pages to return as array.
memmap : bool
If True, return an array stored in a binary file on disk
if possible.
"""
if key is None and series is None:
series = 0
if series is not None:
pages = self.series[series].pages
else:
pages = self.pages
if key is None:
pass
elif isinstance(key, int):
pages = [pages[key]]
elif isinstance(key, slice):
pages = pages[key]
elif isinstance(key, collections.Iterable):
pages = [pages[k] for k in key]
else:
raise TypeError("key must be an int, slice, or sequence")
if not len(pages):
raise ValueError("no pages selected")
if self.is_nih:
if pages[0].is_palette:
result = stack_pages(pages, colormapped=False, squeeze=False)
result = numpy.take(pages[0].color_map, result, axis=1)
result = numpy.swapaxes(result, 0, 1)
else:
result = stack_pages(pages, memmap=memmap,
colormapped=False, squeeze=False)
elif len(pages) == 1:
return pages[0].asarray(memmap=memmap)
elif self.is_ome:
assert not self.is_palette, "color mapping disabled for ome-tiff"
if any(p is None for p in pages):
# zero out missing pages
firstpage = next(p for p in pages if p)
nopage = numpy.zeros_like(
firstpage.asarray(memmap=False))
s = self.series[series]
if memmap:
with tempfile.NamedTemporaryFile() as fh:
result = numpy.memmap(fh, dtype=s.dtype, shape=s.shape)
result = result.reshape(-1)
else:
result = numpy.empty(s.shape, s.dtype).reshape(-1)
index = 0
class KeepOpen:
# keep Tiff files open between consecutive pages
def __init__(self, parent, close):
self.master = parent
self.parent = parent
self._close = close
def open(self, page):
if self._close and page and page.parent != self.parent:
if self.parent != self.master:
self.parent.filehandle.close()
self.parent = page.parent
self.parent.filehandle.open()
def close(self):
if self._close and self.parent != self.master:
self.parent.filehandle.close()
keep = KeepOpen(self, self._multifile_close)
for page in pages:
keep.open(page)
if page:
a = page.asarray(memmap=False, colormapped=False,
reopen=False)
else:
a = nopage
try:
result[index:index + a.size] = a.reshape(-1)
except ValueError as e:
warnings.warn("ome-tiff: %s" % e)
break
index += a.size
keep.close()
else:
result = stack_pages(pages, memmap=memmap)
if key is None:
try:
result.shape = self.series[series].shape
except ValueError:
try:
warnings.warn("failed to reshape %s to %s" % (
result.shape, self.series[series].shape))
# try series of expected shapes
result.shape = (-1,) + self.series[series].shape
except ValueError:
# revert to generic shape
result.shape = (-1,) + pages[0].shape
else:
result.shape = (-1,) + pages[0].shape
return result
def _omeseries(self):
"""Return image series in OME-TIFF file(s)."""
root = etree.fromstring(self.pages[0].tags['image_description'].value)
uuid = root.attrib.get('UUID', None)
self._files = {uuid: self}
dirname = self._fh.dirname
modulo = {}
result = []
for element in root:
if element.tag.endswith('BinaryOnly'):
warnings.warn("ome-xml: not an ome-tiff master file")
break
if element.tag.endswith('StructuredAnnotations'):
for annot in element:
if not annot.attrib.get('Namespace',
'').endswith('modulo'):
continue
for value in annot:
for modul in value:
for along in modul:
if not along.tag[:-1].endswith('Along'):
continue
axis = along.tag[-1]
newaxis = along.attrib.get('Type', 'other')
newaxis = AXES_LABELS[newaxis]
if 'Start' in along.attrib:
labels = range(
int(along.attrib['Start']),
int(along.attrib['End']) + 1,
int(along.attrib.get('Step', 1)))
else:
labels = [label.text for label in along
if label.tag.endswith('Label')]
modulo[axis] = (newaxis, labels)
if not element.tag.endswith('Image'):
continue
for pixels in element:
if not pixels.tag.endswith('Pixels'):
continue
atr = pixels.attrib
dtype = atr.get('Type', None)
axes = ''.join(reversed(atr['DimensionOrder']))
shape = list(int(atr['Size'+ax]) for ax in axes)
size = product(shape[:-2])
ifds = [None] * size
for data in pixels:
if not data.tag.endswith('TiffData'):
continue
atr = data.attrib
ifd = int(atr.get('IFD', 0))
num = int(atr.get('NumPlanes', 1 if 'IFD' in atr else 0))
num = int(atr.get('PlaneCount', num))
idx = [int(atr.get('First'+ax, 0)) for ax in axes[:-2]]
try:
idx = numpy.ravel_multi_index(idx, shape[:-2])
except ValueError:
# ImageJ produces invalid ome-xml when cropping
warnings.warn("ome-xml: invalid TiffData index")
continue
for uuid in data:
if not uuid.tag.endswith('UUID'):
continue
if uuid.text not in self._files:
if not self._multifile:
# abort reading multifile OME series
# and fall back to generic series
return []
fname = uuid.attrib['FileName']
try:
tif = TiffFile(os.path.join(dirname, fname))
except (IOError, ValueError):
tif.close()
warnings.warn(
"ome-xml: failed to read '%s'" % fname)
break
self._files[uuid.text] = tif
if self._multifile_close:
tif.close()
pages = self._files[uuid.text].pages
try:
for i in range(num if num else len(pages)):
ifds[idx + i] = pages[ifd + i]
except IndexError:
warnings.warn("ome-xml: index out of range")
# only process first uuid
break
else:
pages = self.pages
try:
for i in range(num if num else len(pages)):
ifds[idx + i] = pages[ifd + i]
except IndexError:
warnings.warn("ome-xml: index out of range")
if all(i is None for i in ifds):
# skip images without data
continue
dtype = next(i for i in ifds if i).dtype
result.append(Record(axes=axes, shape=shape, pages=ifds,
dtype=numpy.dtype(dtype)))
for record in result:
for axis, (newaxis, labels) in modulo.items():
i = record.axes.index(axis)
size = len(labels)
if record.shape[i] == size:
record.axes = record.axes.replace(axis, newaxis, 1)
else:
record.shape[i] //= size
record.shape.insert(i+1, size)
record.axes = record.axes.replace(axis, axis+newaxis, 1)
record.shape = tuple(record.shape)
# squeeze dimensions
for record in result:
record.shape, record.axes = squeeze_axes(record.shape, record.axes)
return result
def __len__(self):
"""Return number of image pages in file."""
return len(self.pages)
def __getitem__(self, key):
"""Return specified page."""
return self.pages[key]
def __iter__(self):
"""Return iterator over pages."""
return iter(self.pages)
def __str__(self):
"""Return string containing information about file."""
result = [
self._fh.name.capitalize(),
format_size(self._fh.size),
{'<': 'little endian', '>': 'big endian'}[self.byteorder]]
if self.is_bigtiff:
result.append("bigtiff")
if len(self.pages) > 1:
result.append("%i pages" % len(self.pages))
if len(self.series) > 1:
result.append("%i series" % len(self.series))
if len(self._files) > 1:
result.append("%i files" % (len(self._files)))
return ", ".join(result)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
@lazyattr
def fstat(self):
try:
return os.fstat(self._fh.fileno())
except Exception: # io.UnsupportedOperation
return None
@lazyattr
def is_bigtiff(self):
return self.offset_size != 4
@lazyattr
def is_rgb(self):
return all(p.is_rgb for p in self.pages)
@lazyattr
def is_palette(self):
return all(p.is_palette for p in self.pages)
@lazyattr
def is_mdgel(self):
return any(p.is_mdgel for p in self.pages)
@lazyattr
def is_mediacy(self):
return any(p.is_mediacy for p in self.pages)
@lazyattr
def is_stk(self):
return all(p.is_stk for p in self.pages)
@lazyattr
def is_lsm(self):
return self.pages[0].is_lsm
@lazyattr
def is_imagej(self):
return self.pages[0].is_imagej
@lazyattr
def is_micromanager(self):
return self.pages[0].is_micromanager
@lazyattr
def is_nih(self):
return self.pages[0].is_nih
@lazyattr
def is_fluoview(self):
return self.pages[0].is_fluoview
@lazyattr
def is_ome(self):
return self.pages[0].is_ome
class TiffPage(object):
"""A TIFF image file directory (IFD).
Attributes
----------
index : int
Index of page in file.
dtype : str {TIFF_SAMPLE_DTYPES}
Data type of image, colormapped if applicable.
shape : tuple
Dimensions of the image array in TIFF page,
colormapped and with one alpha channel if applicable.
axes : str
Axes label codes:
'X' width, 'Y' height, 'S' sample, 'I' image series|page|plane,
'Z' depth, 'C' color|em-wavelength|channel, 'E' ex-wavelength|lambda,
'T' time, 'R' region|tile, 'A' angle, 'P' phase, 'H' lifetime,
'L' exposure, 'V' event, 'Q' unknown, '_' missing
tags : TiffTags
Dictionary of tags in page.
Tag values are also directly accessible as attributes.
color_map : numpy array
Color look up table, if exists.
cz_lsm_scan_info: Record(dict)
LSM scan info attributes, if exists.
imagej_tags: Record(dict)
Consolidated ImageJ description and metadata tags, if exists.
uic_tags: Record(dict)
Consolidated MetaMorph STK/UIC tags, if exists.
All attributes are read-only.
Notes
-----
The internal, normalized '_shape' attribute is 6 dimensional:
0. number planes (stk)
1. planar samples_per_pixel
2. image_depth Z (sgi)
3. image_length Y
4. image_width X
5. contig samples_per_pixel
"""
def __init__(self, parent):
"""Initialize instance from file."""
self.parent = parent
self.index = len(parent.pages)
self.shape = self._shape = ()
self.dtype = self._dtype = None
self.axes = ""
self.tags = TiffTags()
self._fromfile()
self._process_tags()
def _fromfile(self):
"""Read TIFF IFD structure and its tags from file.
File cursor must be at storage position of IFD offset and is left at
offset to next IFD.
Raises StopIteration if offset (first bytes read) is 0.
"""
fh = self.parent.filehandle
byteorder = self.parent.byteorder
offset_size = self.parent.offset_size
fmt = {4: 'I', 8: 'Q'}[offset_size]
offset = struct.unpack(byteorder + fmt, fh.read(offset_size))[0]
if not offset:
raise StopIteration()
# read standard tags
tags = self.tags
fh.seek(offset)
fmt, size = {4: ('H', 2), 8: ('Q', 8)}[offset_size]
try:
numtags = struct.unpack(byteorder + fmt, fh.read(size))[0]
except Exception:
warnings.warn("corrupted page list")
raise StopIteration()
tagcode = 0
for _ in range(numtags):
try:
tag = TiffTag(self.parent)
# print(tag)
except TiffTag.Error as e:
warnings.warn(str(e))
continue
if tagcode > tag.code:
# expected for early LSM and tifffile versions
warnings.warn("tags are not ordered by code")
tagcode = tag.code
if tag.name not in tags:
tags[tag.name] = tag
else:
# some files contain multiple IFD with same code
# e.g. MicroManager files contain two image_description
i = 1
while True:
name = "%s_%i" % (tag.name, i)
if name not in tags:
tags[name] = tag
break
pos = fh.tell()
if self.is_lsm or (self.index and self.parent.is_lsm):
# correct non standard LSM bitspersample tags
self.tags['bits_per_sample']._correct_lsm_bitspersample(self)
if self.is_lsm:
# read LSM info subrecords
for name, reader in CZ_LSM_INFO_READERS.items():
try:
offset = self.cz_lsm_info['offset_'+name]
except KeyError:
continue
if offset < 8:
# older LSM revision
continue
fh.seek(offset)
try:
setattr(self, 'cz_lsm_'+name, reader(fh))
except ValueError:
pass
elif self.is_stk and 'uic1tag' in tags and not tags['uic1tag'].value:
# read uic1tag now that plane count is known
uic1tag = tags['uic1tag']
fh.seek(uic1tag.value_offset)
tags['uic1tag'].value = Record(
read_uic1tag(fh, byteorder, uic1tag.dtype, uic1tag.count,
tags['uic2tag'].count))
fh.seek(pos)
def _process_tags(self):
"""Validate standard tags and initialize attributes.
Raise ValueError if tag values are not supported.
"""
tags = self.tags
for code, (name, default, dtype, count, validate) in TIFF_TAGS.items():
if not (name in tags or default is None):
tags[name] = TiffTag(code, dtype=dtype, count=count,
value=default, name=name)
if name in tags and validate:
try:
if tags[name].count == 1:
setattr(self, name, validate[tags[name].value])
else:
setattr(self, name, tuple(
validate[value] for value in tags[name].value))
except KeyError:
raise ValueError("%s.value (%s) not supported" %
(name, tags[name].value))
tag = tags['bits_per_sample']
if tag.count == 1:
self.bits_per_sample = tag.value
else:
# LSM might list more items than samples_per_pixel
value = tag.value[:self.samples_per_pixel]
if any((v-value[0] for v in value)):
self.bits_per_sample = value
else:
self.bits_per_sample = value[0]
tag = tags['sample_format']
if tag.count == 1:
self.sample_format = TIFF_SAMPLE_FORMATS[tag.value]
else:
value = tag.value[:self.samples_per_pixel]
if any((v-value[0] for v in value)):
self.sample_format = [TIFF_SAMPLE_FORMATS[v] for v in value]
else:
self.sample_format = TIFF_SAMPLE_FORMATS[value[0]]
if 'photometric' not in tags:
self.photometric = None
if 'image_depth' not in tags:
self.image_depth = 1
if 'image_length' in tags:
self.strips_per_image = int(math.floor(
float(self.image_length + self.rows_per_strip - 1) /
self.rows_per_strip))
else:
self.strips_per_image = 0
key = (self.sample_format, self.bits_per_sample)
self.dtype = self._dtype = TIFF_SAMPLE_DTYPES.get(key, None)
if 'image_length' not in self.tags or 'image_width' not in self.tags:
# some GEL file pages are missing image data
self.image_length = 0
self.image_width = 0
self.image_depth = 0
self.strip_offsets = 0
self._shape = ()
self.shape = ()
self.axes = ''
if self.is_palette:
self.dtype = self.tags['color_map'].dtype[1]
self.color_map = numpy.array(self.color_map, self.dtype)
dmax = self.color_map.max()
if dmax < 256:
self.dtype = numpy.uint8
self.color_map = self.color_map.astype(self.dtype)
#else:
# self.dtype = numpy.uint8
# self.color_map >>= 8
# self.color_map = self.color_map.astype(self.dtype)
self.color_map.shape = (3, -1)
# determine shape of data
image_length = self.image_length
image_width = self.image_width
image_depth = self.image_depth
samples_per_pixel = self.samples_per_pixel
if self.is_stk:
assert self.image_depth == 1
planes = self.tags['uic2tag'].count
if self.is_contig:
self._shape = (planes, 1, 1, image_length, image_width,
samples_per_pixel)
if samples_per_pixel == 1:
self.shape = (planes, image_length, image_width)
self.axes = 'YX'
else:
self.shape = (planes, image_length, image_width,
samples_per_pixel)
self.axes = 'YXS'
else:
self._shape = (planes, samples_per_pixel, 1, image_length,
image_width, 1)
if samples_per_pixel == 1:
self.shape = (planes, image_length, image_width)
self.axes = 'YX'
else:
self.shape = (planes, samples_per_pixel, image_length,
image_width)
self.axes = 'SYX'
# detect type of series
if planes == 1:
self.shape = self.shape[1:]
elif numpy.all(self.uic2tag.z_distance != 0):
self.axes = 'Z' + self.axes
elif numpy.all(numpy.diff(self.uic2tag.time_created) != 0):
self.axes = 'T' + self.axes
else:
self.axes = 'I' + self.axes
# DISABLED
if self.is_palette:
assert False, "color mapping disabled for stk"
if self.color_map.shape[1] >= 2**self.bits_per_sample:
if image_depth == 1:
self.shape = (3, planes, image_length, image_width)
else:
self.shape = (3, planes, image_depth, image_length,
image_width)
self.axes = 'C' + self.axes
else:
warnings.warn("palette cannot be applied")
self.is_palette = False
elif self.is_palette:
samples = 1
if 'extra_samples' in self.tags:
samples += len(self.extra_samples)
if self.is_contig:
self._shape = (1, 1, image_depth, image_length, image_width,
samples)
else:
self._shape = (1, samples, image_depth, image_length,
image_width, 1)
if self.color_map.shape[1] >= 2**self.bits_per_sample:
if image_depth == 1:
self.shape = (3, image_length, image_width)
self.axes = 'CYX'
else:
self.shape = (3, image_depth, image_length, image_width)
self.axes = 'CZYX'
else:
warnings.warn("palette cannot be applied")
self.is_palette = False
if image_depth == 1:
self.shape = (image_length, image_width)
self.axes = 'YX'
else:
self.shape = (image_depth, image_length, image_width)
self.axes = 'ZYX'
elif self.is_rgb or samples_per_pixel > 1:
if self.is_contig:
self._shape = (1, 1, image_depth, image_length, image_width,
samples_per_pixel)
if image_depth == 1:
self.shape = (image_length, image_width, samples_per_pixel)
self.axes = 'YXS'
else:
self.shape = (image_depth, image_length, image_width,
samples_per_pixel)
self.axes = 'ZYXS'
else:
self._shape = (1, samples_per_pixel, image_depth,
image_length, image_width, 1)
if image_depth == 1:
self.shape = (samples_per_pixel, image_length, image_width)
self.axes = 'SYX'
else:
self.shape = (samples_per_pixel, image_depth,
image_length, image_width)
self.axes = 'SZYX'
if False and self.is_rgb and 'extra_samples' in self.tags:
# DISABLED: only use RGB and first alpha channel if exists
extra_samples = self.extra_samples
if self.tags['extra_samples'].count == 1:
extra_samples = (extra_samples, )
for exs in extra_samples:
if exs in ('unassalpha', 'assocalpha', 'unspecified'):
if self.is_contig:
self.shape = self.shape[:-1] + (4,)
else:
self.shape = (4,) + self.shape[1:]
break
else:
self._shape = (1, 1, image_depth, image_length, image_width, 1)
if image_depth == 1:
self.shape = (image_length, image_width)
self.axes = 'YX'
else:
self.shape = (image_depth, image_length, image_width)
self.axes = 'ZYX'
if not self.compression and 'strip_byte_counts' not in tags:
self.strip_byte_counts = (
product(self.shape) * (self.bits_per_sample // 8), )
assert len(self.shape) == len(self.axes)
def asarray(self, squeeze=True, colormapped=True, rgbonly=False,
scale_mdgel=False, memmap=False, reopen=True):
"""Read image data from file and return as numpy array.
Raise ValueError if format is unsupported.
If any of 'squeeze', 'colormapped', or 'rgbonly' are not the default,
the shape of the returned array might be different from the page shape.
Parameters
----------
squeeze : bool
If True, all length-1 dimensions (except X and Y) are
squeezed out from result.
colormapped : bool
If True, color mapping is applied for palette-indexed images.
rgbonly : bool
If True, return RGB(A) image without additional extra samples.
memmap : bool
If True, use numpy.memmap to read arrays from file if possible.
For use on 64 bit systems and files with few huge contiguous data.
reopen : bool
If True and the parent file handle is closed, the file is
temporarily re-opened (and closed if no exception occurs).
scale_mdgel : bool
If True, MD Gel data will be scaled according to the private
metadata in the second TIFF page. The dtype will be float32.
"""
if not self._shape:
return
if self.dtype is None:
raise ValueError("data type not supported: %s%i" % (
self.sample_format, self.bits_per_sample))
if self.compression not in TIFF_DECOMPESSORS:
raise ValueError("cannot decompress %s" % self.compression)
tag = self.tags['sample_format']
if tag.count != 1 and any((i-tag.value[0] for i in tag.value)):
raise ValueError("sample formats don't match %s" % str(tag.value))
fh = self.parent.filehandle
closed = fh.closed
if closed:
if reopen:
fh.open()
else:
raise IOError("file handle is closed")
dtype = self._dtype
shape = self._shape
image_width = self.image_width
image_length = self.image_length
image_depth = self.image_depth
typecode = self.parent.byteorder + dtype
bits_per_sample = self.bits_per_sample
if self.is_tiled:
if 'tile_offsets' in self.tags:
byte_counts = self.tile_byte_counts
offsets = self.tile_offsets
else:
byte_counts = self.strip_byte_counts
offsets = self.strip_offsets
tile_width = self.tile_width
tile_length = self.tile_length
tile_depth = self.tile_depth if 'tile_depth' in self.tags else 1
tw = (image_width + tile_width - 1) // tile_width
tl = (image_length + tile_length - 1) // tile_length
td = (image_depth + tile_depth - 1) // tile_depth
shape = (shape[0], shape[1],
td*tile_depth, tl*tile_length, tw*tile_width, shape[-1])
tile_shape = (tile_depth, tile_length, tile_width, shape[-1])
runlen = tile_width
else:
byte_counts = self.strip_byte_counts
offsets = self.strip_offsets
runlen = image_width
if any(o < 2 for o in offsets):
raise ValueError("corrupted page")
if memmap and self._is_memmappable(rgbonly, colormapped):
result = fh.memmap_array(typecode, shape, offset=offsets[0])
elif self.is_contiguous:
fh.seek(offsets[0])
result = fh.read_array(typecode, product(shape))
result = result.astype('=' + dtype)
else:
if self.is_contig:
runlen *= self.samples_per_pixel
if bits_per_sample in (8, 16, 32, 64, 128):
if (bits_per_sample * runlen) % 8:
raise ValueError("data and sample size mismatch")
def unpack(x):
try:
return numpy.fromstring(x, typecode)
except ValueError as e:
# strips may be missing EOI
warnings.warn("unpack: %s" % e)
xlen = ((len(x) // (bits_per_sample // 8))
* (bits_per_sample // 8))
return numpy.fromstring(x[:xlen], typecode)
elif isinstance(bits_per_sample, tuple):
def unpack(x):
return unpackrgb(x, typecode, bits_per_sample)
else:
def unpack(x):
return unpackints(x, typecode, bits_per_sample, runlen)
decompress = TIFF_DECOMPESSORS[self.compression]
if self.compression == 'jpeg':
table = self.jpeg_tables if 'jpeg_tables' in self.tags else b''
decompress = lambda x: decodejpg(x, table, self.photometric)
if self.is_tiled:
result = numpy.empty(shape, dtype)
tw, tl, td, pl = 0, 0, 0, 0
for offset, bytecount in zip(offsets, byte_counts):
fh.seek(offset)
tile = unpack(decompress(fh.read(bytecount)))
tile.shape = tile_shape
if self.predictor == 'horizontal':
numpy.cumsum(tile, axis=-2, dtype=dtype, out=tile)
result[0, pl, td:td+tile_depth,
tl:tl+tile_length, tw:tw+tile_width, :] = tile
del tile
tw += tile_width
if tw >= shape[4]:
tw, tl = 0, tl + tile_length
if tl >= shape[3]:
tl, td = 0, td + tile_depth
if td >= shape[2]:
td, pl = 0, pl + 1
result = result[...,
:image_depth, :image_length, :image_width, :]
else:
strip_size = (self.rows_per_strip * self.image_width *
self.samples_per_pixel)
result = numpy.empty(shape, dtype).reshape(-1)
index = 0
for offset, bytecount in zip(offsets, byte_counts):
fh.seek(offset)
strip = fh.read(bytecount)
strip = decompress(strip)
strip = unpack(strip)
size = min(result.size, strip.size, strip_size,
result.size - index)
result[index:index+size] = strip[:size]
del strip
index += size
result.shape = self._shape
if self.predictor == 'horizontal' and not (self.is_tiled and not
self.is_contiguous):
# work around bug in LSM510 software
if not (self.parent.is_lsm and not self.compression):
numpy.cumsum(result, axis=-2, dtype=dtype, out=result)
if colormapped and self.is_palette:
if self.color_map.shape[1] >= 2**bits_per_sample:
# FluoView and LSM might fail here
result = numpy.take(self.color_map,
result[:, 0, :, :, :, 0], axis=1)
elif rgbonly and self.is_rgb and 'extra_samples' in self.tags:
# return only RGB and first alpha channel if exists
extra_samples = self.extra_samples
if self.tags['extra_samples'].count == 1:
extra_samples = (extra_samples, )
for i, exs in enumerate(extra_samples):
if exs in ('unassalpha', 'assocalpha', 'unspecified'):
if self.is_contig:
result = result[..., [0, 1, 2, 3+i]]
else:
result = result[:, [0, 1, 2, 3+i]]
break
else:
if self.is_contig:
result = result[..., :3]
else:
result = result[:, :3]
if squeeze:
try:
result.shape = self.shape
except ValueError:
warnings.warn("failed to reshape from %s to %s" % (
str(result.shape), str(self.shape)))
if scale_mdgel and self.parent.is_mdgel:
# MD Gel stores private metadata in the second page
tags = self.parent.pages[1]
if tags.md_file_tag in (2, 128):
scale = tags.md_scale_pixel
scale = scale[0] / scale[1] # rational
result = result.astype('float32')
if tags.md_file_tag == 2:
result **= 2 # squary root data format
result *= scale
if closed:
# TODO: file remains open if an exception occurred above
fh.close()
return result
def _is_memmappable(self, rgbonly, colormapped):
"""Return if image data in file can be memory mapped."""
if not self.parent.filehandle.is_file or not self.is_contiguous:
return False
return not (self.predictor or
(rgbonly and 'extra_samples' in self.tags) or
(colormapped and self.is_palette) or
({'big': '>', 'little': '<'}[sys.byteorder] !=
self.parent.byteorder))
@lazyattr
def is_contiguous(self):
"""Return offset and size of contiguous data, else None.
Excludes prediction and colormapping.
"""
if self.compression or self.bits_per_sample not in (8, 16, 32, 64):
return
if self.is_tiled:
if (self.image_width != self.tile_width or
self.image_length % self.tile_length or
self.tile_width % 16 or self.tile_length % 16):
return
if ('image_depth' in self.tags and 'tile_depth' in self.tags and
(self.image_length != self.tile_length or
self.image_depth % self.tile_depth)):
return
offsets = self.tile_offsets
byte_counts = self.tile_byte_counts
else:
offsets = self.strip_offsets
byte_counts = self.strip_byte_counts
if len(offsets) == 1:
return offsets[0], byte_counts[0]
if self.is_stk or all(offsets[i] + byte_counts[i] == offsets[i+1]
or byte_counts[i+1] == 0 # no data/ignore offset
for i in range(len(offsets)-1)):
return offsets[0], sum(byte_counts)
def __str__(self):
"""Return string containing information about page."""
s = ', '.join(s for s in (
' x '.join(str(i) for i in self.shape),
str(numpy.dtype(self.dtype)),
'%s bit' % str(self.bits_per_sample),
self.photometric if 'photometric' in self.tags else '',
self.compression if self.compression else 'raw',
'|'.join(t[3:] for t in (
'is_stk', 'is_lsm', 'is_nih', 'is_ome', 'is_imagej',
'is_micromanager', 'is_fluoview', 'is_mdgel', 'is_mediacy',
'is_sgi', 'is_reduced', 'is_tiled',
'is_contiguous') if getattr(self, t))) if s)
return "Page %i: %s" % (self.index, s)
def __getattr__(self, name):
"""Return tag value."""
if name in self.tags:
value = self.tags[name].value
setattr(self, name, value)
return value
raise AttributeError(name)
@lazyattr
def uic_tags(self):
"""Consolidate UIC tags."""
if not self.is_stk:
raise AttributeError("uic_tags")
tags = self.tags
result = Record()
result.number_planes = tags['uic2tag'].count
if 'image_description' in tags:
result.plane_descriptions = self.image_description.split(b'\x00')
if 'uic1tag' in tags:
result.update(tags['uic1tag'].value)
if 'uic3tag' in tags:
result.update(tags['uic3tag'].value) # wavelengths
if 'uic4tag' in tags:
result.update(tags['uic4tag'].value) # override uic1 tags
uic2tag = tags['uic2tag'].value
result.z_distance = uic2tag.z_distance
result.time_created = uic2tag.time_created
result.time_modified = uic2tag.time_modified
try:
result.datetime_created = [
julian_datetime(*dt) for dt in
zip(uic2tag.date_created, uic2tag.time_created)]
result.datetime_modified = [
julian_datetime(*dt) for dt in
zip(uic2tag.date_modified, uic2tag.time_modified)]
except ValueError as e:
warnings.warn("uic_tags: %s" % e)
return result
@lazyattr
def imagej_tags(self):
"""Consolidate ImageJ metadata."""
if not self.is_imagej:
raise AttributeError("imagej_tags")
tags = self.tags
if 'image_description_1' in tags:
# MicroManager
result = imagej_description(tags['image_description_1'].value)
else:
result = imagej_description(tags['image_description'].value)
if 'imagej_metadata' in tags:
try:
result.update(imagej_metadata(
tags['imagej_metadata'].value,
tags['imagej_byte_counts'].value,
self.parent.byteorder))
except Exception as e:
warnings.warn(str(e))
return Record(result)
@lazyattr
def is_rgb(self):
"""True if page contains a RGB image."""
return ('photometric' in self.tags and
self.tags['photometric'].value == 2)
@lazyattr
def is_contig(self):
"""True if page contains a contiguous image."""
return ('planar_configuration' in self.tags and
self.tags['planar_configuration'].value == 1)
@lazyattr
def is_palette(self):
"""True if page contains a palette-colored image and not OME or STK."""
try:
# turn off color mapping for OME-TIFF and STK
if self.is_stk or self.is_ome or self.parent.is_ome:
return False
except IndexError:
pass # OME-XML not found in first page
return ('photometric' in self.tags and
self.tags['photometric'].value == 3)
@lazyattr
def is_tiled(self):
"""True if page contains tiled image."""
return 'tile_width' in self.tags
@lazyattr
def is_reduced(self):
"""True if page is a reduced image of another image."""
return bool(self.tags['new_subfile_type'].value & 1)
@lazyattr
def is_mdgel(self):
"""True if page contains md_file_tag tag."""
return 'md_file_tag' in self.tags
@lazyattr
def is_mediacy(self):
"""True if page contains Media Cybernetics Id tag."""
return ('mc_id' in self.tags and
self.tags['mc_id'].value.startswith(b'MC TIFF'))
@lazyattr
def is_stk(self):
"""True if page contains UIC2Tag tag."""
return 'uic2tag' in self.tags
@lazyattr
def is_lsm(self):
"""True if page contains LSM CZ_LSM_INFO tag."""
return 'cz_lsm_info' in self.tags
@lazyattr
def is_fluoview(self):
"""True if page contains FluoView MM_STAMP tag."""
return 'mm_stamp' in self.tags
@lazyattr
def is_nih(self):
"""True if page contains NIH image header."""
return 'nih_image_header' in self.tags
@lazyattr
def is_sgi(self):
"""True if page contains SGI image and tile depth tags."""
return 'image_depth' in self.tags and 'tile_depth' in self.tags
@lazyattr
def is_ome(self):
"""True if page contains OME-XML in image_description tag."""
return ('image_description' in self.tags and self.tags[
'image_description'].value.startswith(b'<?xml version='))
@lazyattr
def is_shaped(self):
"""True if page contains shape in image_description tag."""
return ('image_description' in self.tags and self.tags[
'image_description'].value.startswith(b'shape=('))
@lazyattr
def is_imagej(self):
"""True if page contains ImageJ description."""
return (
('image_description' in self.tags and
self.tags['image_description'].value.startswith(b'ImageJ=')) or
('image_description_1' in self.tags and # Micromanager
self.tags['image_description_1'].value.startswith(b'ImageJ=')))
@lazyattr
def is_micromanager(self):
"""True if page contains Micro-Manager metadata."""
return 'micromanager_metadata' in self.tags
class TiffTag(object):
"""A TIFF tag structure.
Attributes
----------
name : string
Attribute name of tag.
code : int
Decimal code of tag.
dtype : str
Datatype of tag data. One of TIFF_DATA_TYPES.
count : int
Number of values.
value : various types
Tag data as Python object.
value_offset : int
Location of value in file, if any.
All attributes are read-only.
"""
__slots__ = ('code', 'name', 'count', 'dtype', 'value', 'value_offset',
'_offset', '_value', '_type')
class Error(Exception):
pass
def __init__(self, arg, **kwargs):
"""Initialize instance from file or arguments."""
self._offset = None
if hasattr(arg, '_fh'):
self._fromfile(arg, **kwargs)
else:
self._fromdata(arg, **kwargs)
def _fromdata(self, code, dtype, count, value, name=None):
"""Initialize instance from arguments."""
self.code = int(code)
self.name = name if name else str(code)
self.dtype = TIFF_DATA_TYPES[dtype]
self.count = int(count)
self.value = value
self._value = value
self._type = dtype
def _fromfile(self, parent):
"""Read tag structure from open file. Advance file cursor."""
fh = parent.filehandle
byteorder = parent.byteorder
self._offset = fh.tell()
self.value_offset = self._offset + parent.offset_size + 4
fmt, size = {4: ('HHI4s', 12), 8: ('HHQ8s', 20)}[parent.offset_size]
data = fh.read(size)
code, dtype = struct.unpack(byteorder + fmt[:2], data[:4])
count, value = struct.unpack(byteorder + fmt[2:], data[4:])
self._value = value
self._type = dtype
if code in TIFF_TAGS:
name = TIFF_TAGS[code][0]
elif code in CUSTOM_TAGS:
name = CUSTOM_TAGS[code][0]
else:
name = str(code)
try:
dtype = TIFF_DATA_TYPES[self._type]
except KeyError:
raise TiffTag.Error("unknown tag data type %i" % self._type)
fmt = '%s%i%s' % (byteorder, count*int(dtype[0]), dtype[1])
size = struct.calcsize(fmt)
if size > parent.offset_size or code in CUSTOM_TAGS:
pos = fh.tell()
tof = {4: 'I', 8: 'Q'}[parent.offset_size]
self.value_offset = offset = struct.unpack(byteorder+tof, value)[0]
if offset < 0 or offset > parent.filehandle.size:
raise TiffTag.Error("corrupt file - invalid tag value offset")
elif offset < 4:
raise TiffTag.Error("corrupt value offset for tag %i" % code)
fh.seek(offset)
if code in CUSTOM_TAGS:
readfunc = CUSTOM_TAGS[code][1]
value = readfunc(fh, byteorder, dtype, count)
if isinstance(value, dict): # numpy.core.records.record
value = Record(value)
elif code in TIFF_TAGS or dtype[-1] == 's':
value = struct.unpack(fmt, fh.read(size))
else:
value = read_numpy(fh, byteorder, dtype, count)
fh.seek(pos)
else:
value = struct.unpack(fmt, value[:size])
if code not in CUSTOM_TAGS and code not in (273, 279, 324, 325):
# scalar value if not strip/tile offsets/byte_counts
if len(value) == 1:
value = value[0]
if (dtype.endswith('s') and isinstance(value, bytes)
and self._type != 7):
# TIFF ASCII fields can contain multiple strings,
# each terminated with a NUL
value = stripascii(value)
self.code = code
self.name = name
self.dtype = dtype
self.count = count
self.value = value
def _correct_lsm_bitspersample(self, parent):
"""Correct LSM bitspersample tag.
Old LSM writers may use a separate region for two 16-bit values,
although they fit into the tag value element of the tag.
"""
if self.code == 258 and self.count == 2:
# TODO: test this. Need example file.
warnings.warn("correcting LSM bitspersample tag")
fh = parent.filehandle
tof = {4: '<I', 8: '<Q'}[parent.offset_size]
self.value_offset = struct.unpack(tof, self._value)[0]
fh.seek(self.value_offset)
self.value = struct.unpack("<HH", fh.read(4))
def as_str(self):
"""Return value as human readable string."""
return ((str(self.value).split('\n', 1)[0]) if (self._type != 7)
else '<undefined>')
def __str__(self):
"""Return string containing information about tag."""
return ' '.join(str(getattr(self, s)) for s in self.__slots__)
class TiffSequence(object):
"""Sequence of image files.
The data shape and dtype of all files must match.
Properties
----------
files : list
List of file names.
shape : tuple
Shape of image sequence.
axes : str
Labels of axes in shape.
Examples
--------
>>> tifs = TiffSequence("test.oif.files/*.tif")
>>> tifs.shape, tifs.axes
((2, 100), 'CT')
>>> data = tifs.asarray()
>>> data.shape
(2, 100, 256, 256)
"""
_patterns = {
'axes': r"""
# matches Olympus OIF and Leica TIFF series
_?(?:(q|l|p|a|c|t|x|y|z|ch|tp)(\d{1,4}))
_?(?:(q|l|p|a|c|t|x|y|z|ch|tp)(\d{1,4}))?
_?(?:(q|l|p|a|c|t|x|y|z|ch|tp)(\d{1,4}))?
_?(?:(q|l|p|a|c|t|x|y|z|ch|tp)(\d{1,4}))?
_?(?:(q|l|p|a|c|t|x|y|z|ch|tp)(\d{1,4}))?
_?(?:(q|l|p|a|c|t|x|y|z|ch|tp)(\d{1,4}))?
_?(?:(q|l|p|a|c|t|x|y|z|ch|tp)(\d{1,4}))?
"""}
class ParseError(Exception):
pass
def __init__(self, files, imread=TiffFile, pattern='axes',
*args, **kwargs):
"""Initialize instance from multiple files.
Parameters
----------
files : str, or sequence of str
Glob pattern or sequence of file names.
imread : function or class
Image read function or class with asarray function returning numpy
array from single file.
pattern : str
Regular expression pattern that matches axes names and sequence
indices in file names.
By default this matches Olympus OIF and Leica TIFF series.
"""
if isinstance(files, basestring):
files = natural_sorted(glob.glob(files))
files = list(files)
if not files:
raise ValueError("no files found")
#if not os.path.isfile(files[0]):
# raise ValueError("file not found")
self.files = files
if hasattr(imread, 'asarray'):
# redefine imread
_imread = imread
def imread(fname, *args, **kwargs):
with _imread(fname) as im:
return im.asarray(*args, **kwargs)
self.imread = imread
self.pattern = self._patterns.get(pattern, pattern)
try:
self._parse()
if not self.axes:
self.axes = 'I'
except self.ParseError:
self.axes = 'I'
self.shape = (len(files),)
self._start_index = (0,)
self._indices = tuple((i,) for i in range(len(files)))
def __str__(self):
"""Return string with information about image sequence."""
return "\n".join([
self.files[0],
'* files: %i' % len(self.files),
'* axes: %s' % self.axes,
'* shape: %s' % str(self.shape)])
def __len__(self):
return len(self.files)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def close(self):
pass
def asarray(self, memmap=False, *args, **kwargs):
"""Read image data from all files and return as single numpy array.
If memmap is True, return an array stored in a binary file on disk.
The args and kwargs parameters are passed to the imread function.
Raise IndexError or ValueError if image shapes don't match.
"""
im = self.imread(self.files[0], *args, **kwargs)
shape = self.shape + im.shape
if memmap:
with tempfile.NamedTemporaryFile() as fh:
result = numpy.memmap(fh, dtype=im.dtype, shape=shape)
else:
result = numpy.zeros(shape, dtype=im.dtype)
result = result.reshape(-1, *im.shape)
for index, fname in zip(self._indices, self.files):
index = [i-j for i, j in zip(index, self._start_index)]
index = numpy.ravel_multi_index(index, self.shape)
im = self.imread(fname, *args, **kwargs)
result[index] = im
result.shape = shape
return result
def _parse(self):
"""Get axes and shape from file names."""
if not self.pattern:
raise self.ParseError("invalid pattern")
pattern = re.compile(self.pattern, re.IGNORECASE | re.VERBOSE)
matches = pattern.findall(self.files[0])
if not matches:
raise self.ParseError("pattern doesn't match file names")
matches = matches[-1]
if len(matches) % 2:
raise self.ParseError("pattern doesn't match axis name and index")
axes = ''.join(m for m in matches[::2] if m)
if not axes:
raise self.ParseError("pattern doesn't match file names")
indices = []
for fname in self.files:
matches = pattern.findall(fname)[-1]
if axes != ''.join(m for m in matches[::2] if m):
raise ValueError("axes don't match within the image sequence")
indices.append([int(m) for m in matches[1::2] if m])
shape = tuple(numpy.max(indices, axis=0))
start_index = tuple(numpy.min(indices, axis=0))
shape = tuple(i-j+1 for i, j in zip(shape, start_index))
if product(shape) != len(self.files):
warnings.warn("files are missing. Missing data are zeroed")
self.axes = axes.upper()
self.shape = shape
self._indices = indices
self._start_index = start_index
class Record(dict):
"""Dictionary with attribute access.
Can also be initialized with numpy.core.records.record.
"""
__slots__ = ()
def __init__(self, arg=None, **kwargs):
if kwargs:
arg = kwargs
elif arg is None:
arg = {}
try:
dict.__init__(self, arg)
except (TypeError, ValueError):
for i, name in enumerate(arg.dtype.names):
v = arg[i]
self[name] = v if v.dtype.char != 'S' else stripnull(v)
def __getattr__(self, name):
return self[name]
def __setattr__(self, name, value):
self.__setitem__(name, value)
def __str__(self):
"""Pretty print Record."""
s = []
lists = []
for k in sorted(self):
try:
if k.startswith('_'): # does not work with byte
continue
except AttributeError:
pass
v = self[k]
if isinstance(v, (list, tuple)) and len(v):
if isinstance(v[0], Record):
lists.append((k, v))
continue
elif isinstance(v[0], TiffPage):
v = [i.index for i in v if i]
s.append(
("* %s: %s" % (k, str(v))).split("\n", 1)[0]
[:PRINT_LINE_LEN].rstrip())
for k, v in lists:
l = []
for i, w in enumerate(v):
l.append("* %s[%i]\n %s" % (k, i,
str(w).replace("\n", "\n ")))
s.append('\n'.join(l))
return '\n'.join(s)
class TiffTags(Record):
"""Dictionary of TiffTag with attribute access."""
def __str__(self):
"""Return string with information about all tags."""
s = []
for tag in sorted(self.values(), key=lambda x: x.code):
typecode = "%i%s" % (tag.count * int(tag.dtype[0]), tag.dtype[1])
line = "* %i %s (%s) %s" % (
tag.code, tag.name, typecode, tag.as_str())
s.append(line[:PRINT_LINE_LEN].lstrip())
return '\n'.join(s)
class FileHandle(object):
"""Binary file handle.
* Handle embedded files (for CZI within CZI files).
* Allow to re-open closed files (for multi file formats such as OME-TIFF).
* Read numpy arrays and records from file like objects.
Only binary read, seek, tell, and close are supported on embedded files.
When initialized from another file handle, do not use it unless this
FileHandle is closed.
Attributes
----------
name : str
Name of the file.
path : str
Absolute path to file.
size : int
Size of file in bytes.
is_file : bool
If True, file has a filno and can be memory mapped.
All attributes are read-only.
"""
__slots__ = ('_fh', '_arg', '_mode', '_name', '_dir',
'_offset', '_size', '_close', 'is_file')
def __init__(self, arg, mode='rb', name=None, offset=None, size=None):
"""Initialize file handle from file name or another file handle.
Parameters
----------
arg : str, File, or FileHandle
File name or open file handle.
mode : str
File open mode in case 'arg' is a file name.
name : str
Optional name of file in case 'arg' is a file handle.
offset : int
Optional start position of embedded file. By default this is
the current file position.
size : int
Optional size of embedded file. By default this is the number
of bytes from the 'offset' to the end of the file.
"""
self._fh = None
self._arg = arg
self._mode = mode
self._name = name
self._dir = ''
self._offset = offset
self._size = size
self._close = True
self.is_file = False
self.open()
def open(self):
"""Open or re-open file."""
if self._fh:
return # file is open
if isinstance(self._arg, basestring):
# file name
self._arg = os.path.abspath(self._arg)
self._dir, self._name = os.path.split(self._arg)
self._fh = open(self._arg, self._mode)
self._close = True
if self._offset is None:
self._offset = 0
elif isinstance(self._arg, FileHandle):
# FileHandle
self._fh = self._arg._fh
if self._offset is None:
self._offset = 0
self._offset += self._arg._offset
self._close = False
if not self._name:
if self._offset:
name, ext = os.path.splitext(self._arg._name)
self._name = "%s@%i%s" % (name, self._offset, ext)
else:
self._name = self._arg._name
self._dir = self._arg._dir
else:
# open file object
self._fh = self._arg
if self._offset is None:
self._offset = self._arg.tell()
self._close = False
if not self._name:
try:
self._dir, self._name = os.path.split(self._fh.name)
except AttributeError:
self._name = "Unnamed stream"
if self._offset:
self._fh.seek(self._offset)
if self._size is None:
pos = self._fh.tell()
self._fh.seek(self._offset, 2)
self._size = self._fh.tell()
self._fh.seek(pos)
try:
self._fh.fileno()
self.is_file = True
except Exception:
self.is_file = False
def read(self, size=-1):
"""Read 'size' bytes from file, or until EOF is reached."""
if size < 0 and self._offset:
size = self._size
return self._fh.read(size)
def memmap_array(self, dtype, shape, offset=0, mode='r', order='C'):
"""Return numpy.memmap of data stored in file."""
if not self.is_file:
raise ValueError("Can not memory map file without fileno.")
return numpy.memmap(self._fh, dtype=dtype, mode=mode,
offset=self._offset + offset,
shape=shape, order=order)
def read_array(self, dtype, count=-1, sep=""):
"""Return numpy array from file.
Work around numpy issue #2230, "numpy.fromfile does not accept
StringIO object" https://github.com/numpy/numpy/issues/2230.
"""
try:
return numpy.fromfile(self._fh, dtype, count, sep)
except IOError:
if count < 0:
size = self._size
else:
size = count * numpy.dtype(dtype).itemsize
data = self._fh.read(size)
return numpy.fromstring(data, dtype, count, sep)
def read_record(self, dtype, shape=1, byteorder=None):
"""Return numpy record from file."""
try:
rec = numpy.rec.fromfile(self._fh, dtype, shape,
byteorder=byteorder)
except Exception:
dtype = numpy.dtype(dtype)
if shape is None:
shape = self._size // dtype.itemsize
size = product(sequence(shape)) * dtype.itemsize
data = self._fh.read(size)
return numpy.rec.fromstring(data, dtype, shape,
byteorder=byteorder)
return rec[0] if shape == 1 else rec
def tell(self):
"""Return file's current position."""
return self._fh.tell() - self._offset
def seek(self, offset, whence=0):
"""Set file's current position."""
if self._offset:
if whence == 0:
self._fh.seek(self._offset + offset, whence)
return
elif whence == 2:
self._fh.seek(self._offset + self._size + offset, 0)
return
self._fh.seek(offset, whence)
def close(self):
"""Close file."""
if self._close and self._fh:
self._fh.close()
self._fh = None
self.is_file = False
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def __getattr__(self, name):
"""Return attribute from underlying file object."""
if self._offset:
warnings.warn(
"FileHandle: '%s' not implemented for embedded files" % name)
return getattr(self._fh, name)
@property
def name(self):
return self._name
@property
def dirname(self):
return self._dir
@property
def path(self):
return os.path.join(self._dir, self._name)
@property
def size(self):
return self._size
@property
def closed(self):
return self._fh is None
def read_bytes(fh, byteorder, dtype, count):
"""Read tag data from file and return as byte string."""
dtype = 'b' if dtype[-1] == 's' else byteorder+dtype[-1]
return fh.read_array(dtype, count).tostring()
def read_numpy(fh, byteorder, dtype, count):
"""Read tag data from file and return as numpy array."""
dtype = 'b' if dtype[-1] == 's' else byteorder+dtype[-1]
return fh.read_array(dtype, count)
def read_json(fh, byteorder, dtype, count):
"""Read JSON tag data from file and return as object."""
data = fh.read(count)
try:
return json.loads(unicode(stripnull(data), 'utf-8'))
except ValueError:
warnings.warn("invalid JSON `%s`" % data)
def read_mm_header(fh, byteorder, dtype, count):
"""Read MM_HEADER tag from file and return as numpy.rec.array."""
return fh.read_record(MM_HEADER, byteorder=byteorder)
def read_mm_stamp(fh, byteorder, dtype, count):
"""Read MM_STAMP tag from file and return as numpy.array."""
return fh.read_array(byteorder+'f8', 8)
def read_uic1tag(fh, byteorder, dtype, count, plane_count=None):
"""Read MetaMorph STK UIC1Tag from file and return as dictionary.
Return empty dictionary if plane_count is unknown.
"""
assert dtype in ('2I', '1I') and byteorder == '<'
result = {}
if dtype == '2I':
# pre MetaMorph 2.5 (not tested)
values = fh.read_array('<u4', 2*count).reshape(count, 2)
result = {'z_distance': values[:, 0] / values[:, 1]}
elif plane_count:
for i in range(count):
tagid = struct.unpack('<I', fh.read(4))[0]
if tagid in (28, 29, 37, 40, 41):
# silently skip unexpected tags
fh.read(4)
continue
name, value = read_uic_tag(fh, tagid, plane_count, offset=True)
result[name] = value
return result
def read_uic2tag(fh, byteorder, dtype, plane_count):
"""Read MetaMorph STK UIC2Tag from file and return as dictionary."""
assert dtype == '2I' and byteorder == '<'
values = fh.read_array('<u4', 6*plane_count).reshape(plane_count, 6)
return {
'z_distance': values[:, 0] / values[:, 1],
'date_created': values[:, 2], # julian days
'time_created': values[:, 3], # milliseconds
'date_modified': values[:, 4], # julian days
'time_modified': values[:, 5], # milliseconds
}
def read_uic3tag(fh, byteorder, dtype, plane_count):
"""Read MetaMorph STK UIC3Tag from file and return as dictionary."""
assert dtype == '2I' and byteorder == '<'
values = fh.read_array('<u4', 2*plane_count).reshape(plane_count, 2)
return {'wavelengths': values[:, 0] / values[:, 1]}
def read_uic4tag(fh, byteorder, dtype, plane_count):
"""Read MetaMorph STK UIC4Tag from file and return as dictionary."""
assert dtype == '1I' and byteorder == '<'
result = {}
while True:
tagid = struct.unpack('<H', fh.read(2))[0]
if tagid == 0:
break
name, value = read_uic_tag(fh, tagid, plane_count, offset=False)
result[name] = value
return result
def read_uic_tag(fh, tagid, plane_count, offset):
"""Read a single UIC tag value from file and return tag name and value.
UIC1Tags use an offset.
"""
def read_int(count=1):
value = struct.unpack('<%iI' % count, fh.read(4*count))
return value[0] if count == 1 else value
try:
name, dtype = UIC_TAGS[tagid]
except KeyError:
# unknown tag
return '_tagid_%i' % tagid, read_int()
if offset:
pos = fh.tell()
if dtype not in (int, None):
off = read_int()
if off < 8:
warnings.warn("invalid offset for uic tag '%s': %i"
% (name, off))
return name, off
fh.seek(off)
if dtype is None:
# skip
name = '_' + name
value = read_int()
elif dtype is int:
# int
value = read_int()
elif dtype is Fraction:
# fraction
value = read_int(2)
value = value[0] / value[1]
elif dtype is julian_datetime:
# datetime
value = julian_datetime(*read_int(2))
elif dtype is read_uic_image_property:
# ImagePropertyEx
value = read_uic_image_property(fh)
elif dtype is str:
# pascal string
size = read_int()
if 0 <= size < 2**10:
value = struct.unpack('%is' % size, fh.read(size))[0][:-1]
value = stripnull(value)
elif offset:
value = ''
warnings.warn("corrupt string in uic tag '%s'" % name)
else:
raise ValueError("invalid string size %i" % size)
elif dtype == '%ip':
# sequence of pascal strings
value = []
for i in range(plane_count):
size = read_int()
if 0 <= size < 2**10:
string = struct.unpack('%is' % size, fh.read(size))[0][:-1]
string = stripnull(string)
value.append(string)
elif offset:
warnings.warn("corrupt string in uic tag '%s'" % name)
else:
raise ValueError("invalid string size %i" % size)
else:
# struct or numpy type
dtype = '<' + dtype
if '%i' in dtype:
dtype = dtype % plane_count
if '(' in dtype:
# numpy type
value = fh.read_array(dtype, 1)[0]
if value.shape[-1] == 2:
# assume fractions
value = value[..., 0] / value[..., 1]
else:
# struct format
value = struct.unpack(dtype, fh.read(struct.calcsize(dtype)))
if len(value) == 1:
value = value[0]
if offset:
fh.seek(pos + 4)
return name, value
def read_uic_image_property(fh):
"""Read UIC ImagePropertyEx tag from file and return as dict."""
# TODO: test this
size = struct.unpack('B', fh.read(1))[0]
name = struct.unpack('%is' % size, fh.read(size))[0][:-1]
flags, prop = struct.unpack('<IB', fh.read(5))
if prop == 1:
value = struct.unpack('II', fh.read(8))
value = value[0] / value[1]
else:
size = struct.unpack('B', fh.read(1))[0]
value = struct.unpack('%is' % size, fh.read(size))[0]
return dict(name=name, flags=flags, value=value)
def read_cz_lsm_info(fh, byteorder, dtype, count):
"""Read CS_LSM_INFO tag from file and return as numpy.rec.array."""
assert byteorder == '<'
magic_number, structure_size = struct.unpack('<II', fh.read(8))
if magic_number not in (50350412, 67127628):
raise ValueError("not a valid CS_LSM_INFO structure")
fh.seek(-8, 1)
if structure_size < numpy.dtype(CZ_LSM_INFO).itemsize:
# adjust structure according to structure_size
cz_lsm_info = []
size = 0
for name, dtype in CZ_LSM_INFO:
size += numpy.dtype(dtype).itemsize
if size > structure_size:
break
cz_lsm_info.append((name, dtype))
else:
cz_lsm_info = CZ_LSM_INFO
return fh.read_record(cz_lsm_info, byteorder=byteorder)
def read_cz_lsm_floatpairs(fh):
"""Read LSM sequence of float pairs from file and return as list."""
size = struct.unpack('<i', fh.read(4))[0]
return fh.read_array('<2f8', count=size)
def read_cz_lsm_positions(fh):
"""Read LSM positions from file and return as list."""
size = struct.unpack('<I', fh.read(4))[0]
return fh.read_array('<2f8', count=size)
def read_cz_lsm_time_stamps(fh):
"""Read LSM time stamps from file and return as list."""
size, count = struct.unpack('<ii', fh.read(8))
if size != (8 + 8 * count):
raise ValueError("lsm_time_stamps block is too short")
# return struct.unpack('<%dd' % count, fh.read(8*count))
return fh.read_array('<f8', count=count)
def read_cz_lsm_event_list(fh):
"""Read LSM events from file and return as list of (time, type, text)."""
count = struct.unpack('<II', fh.read(8))[1]
events = []
while count > 0:
esize, etime, etype = struct.unpack('<IdI', fh.read(16))
etext = stripnull(fh.read(esize - 16))
events.append((etime, etype, etext))
count -= 1
return events
def read_cz_lsm_scan_info(fh):
"""Read LSM scan information from file and return as Record."""
block = Record()
blocks = [block]
unpack = struct.unpack
if 0x10000000 != struct.unpack('<I', fh.read(4))[0]:
# not a Recording sub block
raise ValueError("not a lsm_scan_info structure")
fh.read(8)
while True:
entry, dtype, size = unpack('<III', fh.read(12))
if dtype == 2:
# ascii
value = stripnull(fh.read(size))
elif dtype == 4:
# long
value = unpack('<i', fh.read(4))[0]
elif dtype == 5:
# rational
value = unpack('<d', fh.read(8))[0]
else:
value = 0
if entry in CZ_LSM_SCAN_INFO_ARRAYS:
blocks.append(block)
name = CZ_LSM_SCAN_INFO_ARRAYS[entry]
newobj = []
setattr(block, name, newobj)
block = newobj
elif entry in CZ_LSM_SCAN_INFO_STRUCTS:
blocks.append(block)
newobj = Record()
block.append(newobj)
block = newobj
elif entry in CZ_LSM_SCAN_INFO_ATTRIBUTES:
name = CZ_LSM_SCAN_INFO_ATTRIBUTES[entry]
setattr(block, name, value)
elif entry == 0xffffffff:
# end sub block
block = blocks.pop()
else:
# unknown entry
setattr(block, "entry_0x%x" % entry, value)
if not blocks:
break
return block
def read_nih_image_header(fh, byteorder, dtype, count):
"""Read NIH_IMAGE_HEADER tag from file and return as numpy.rec.array."""
a = fh.read_record(NIH_IMAGE_HEADER, byteorder=byteorder)
a = a.newbyteorder(byteorder)
a.xunit = a.xunit[:a._xunit_len]
a.um = a.um[:a._um_len]
return a
def read_micromanager_metadata(fh):
"""Read MicroManager non-TIFF settings from open file and return as dict.
The settings can be used to read image data without parsing the TIFF file.
Raise ValueError if file does not contain valid MicroManager metadata.
"""
fh.seek(0)
try:
byteorder = {b'II': '<', b'MM': '>'}[fh.read(2)]
except IndexError:
raise ValueError("not a MicroManager TIFF file")
results = {}
fh.seek(8)
(index_header, index_offset, display_header, display_offset,
comments_header, comments_offset, summary_header, summary_length
) = struct.unpack(byteorder + "IIIIIIII", fh.read(32))
if summary_header != 2355492:
raise ValueError("invalid MicroManager summary_header")
results['summary'] = read_json(fh, byteorder, None, summary_length)
if index_header != 54773648:
raise ValueError("invalid MicroManager index_header")
fh.seek(index_offset)
header, count = struct.unpack(byteorder + "II", fh.read(8))
if header != 3453623:
raise ValueError("invalid MicroManager index_header")
data = struct.unpack(byteorder + "IIIII"*count, fh.read(20*count))
results['index_map'] = {
'channel': data[::5], 'slice': data[1::5], 'frame': data[2::5],
'position': data[3::5], 'offset': data[4::5]}
if display_header != 483765892:
raise ValueError("invalid MicroManager display_header")
fh.seek(display_offset)
header, count = struct.unpack(byteorder + "II", fh.read(8))
if header != 347834724:
raise ValueError("invalid MicroManager display_header")
results['display_settings'] = read_json(fh, byteorder, None, count)
if comments_header != 99384722:
raise ValueError("invalid MicroManager comments_header")
fh.seek(comments_offset)
header, count = struct.unpack(byteorder + "II", fh.read(8))
if header != 84720485:
raise ValueError("invalid MicroManager comments_header")
results['comments'] = read_json(fh, byteorder, None, count)
return results
def imagej_metadata(data, bytecounts, byteorder):
"""Return dict from ImageJ metadata tag value."""
_str = str if sys.version_info[0] < 3 else lambda x: str(x, 'cp1252')
def read_string(data, byteorder):
return _str(stripnull(data[0 if byteorder == '<' else 1::2]))
def read_double(data, byteorder):
return struct.unpack(byteorder+('d' * (len(data) // 8)), data)
def read_bytes(data, byteorder):
#return struct.unpack('b' * len(data), data)
return numpy.fromstring(data, 'uint8')
metadata_types = { # big endian
b'info': ('info', read_string),
b'labl': ('labels', read_string),
b'rang': ('ranges', read_double),
b'luts': ('luts', read_bytes),
b'roi ': ('roi', read_bytes),
b'over': ('overlays', read_bytes)}
metadata_types.update( # little endian
dict((k[::-1], v) for k, v in metadata_types.items()))
if not bytecounts:
raise ValueError("no ImageJ metadata")
if not data[:4] in (b'IJIJ', b'JIJI'):
raise ValueError("invalid ImageJ metadata")
header_size = bytecounts[0]
if header_size < 12 or header_size > 804:
raise ValueError("invalid ImageJ metadata header size")
ntypes = (header_size - 4) // 8
header = struct.unpack(byteorder+'4sI'*ntypes, data[4:4+ntypes*8])
pos = 4 + ntypes * 8
counter = 0
result = {}
for mtype, count in zip(header[::2], header[1::2]):
values = []
name, func = metadata_types.get(mtype, (_str(mtype), read_bytes))
for _ in range(count):
counter += 1
pos1 = pos + bytecounts[counter]
values.append(func(data[pos:pos1], byteorder))
pos = pos1
result[name.strip()] = values[0] if count == 1 else values
return result
def imagej_description(description):
"""Return dict from ImageJ image_description tag."""
def _bool(val):
return {b'true': True, b'false': False}[val.lower()]
_str = str if sys.version_info[0] < 3 else lambda x: str(x, 'cp1252')
result = {}
for line in description.splitlines():
try:
key, val = line.split(b'=')
except Exception:
continue
key = key.strip()
val = val.strip()
for dtype in (int, float, _bool, _str):
try:
val = dtype(val)
break
except Exception:
pass
result[_str(key)] = val
return result
def _replace_by(module_function, package=None, warn=False):
"""Try replace decorated function by module.function."""
try:
from importlib import import_module
except ImportError:
warnings.warn('could not import module importlib')
return lambda func: func
def decorate(func, module_function=module_function, warn=warn):
try:
module, function = module_function.split('.')
if not package:
module = import_module(module)
else:
module = import_module('.' + module, package=package)
func, oldfunc = getattr(module, function), func
globals()['__old_' + func.__name__] = oldfunc
except Exception:
if warn:
warnings.warn("failed to import %s" % module_function)
return func
return decorate
def decodejpg(encoded, tables=b'', photometric=None,
ycbcr_subsampling=None, ycbcr_positioning=None):
"""Decode JPEG encoded byte string (using _czifile extension module)."""
import _czifile
image = _czifile.decodejpg(encoded, tables)
if photometric == 'rgb' and ycbcr_subsampling and ycbcr_positioning:
# TODO: convert YCbCr to RGB
pass
return image.tostring()
@_replace_by('_tifffile.decodepackbits')
def decodepackbits(encoded):
"""Decompress PackBits encoded byte string.
PackBits is a simple byte-oriented run-length compression scheme.
"""
func = ord if sys.version[0] == '2' else lambda x: x
result = []
result_extend = result.extend
i = 0
try:
while True:
n = func(encoded[i]) + 1
i += 1
if n < 129:
result_extend(encoded[i:i+n])
i += n
elif n > 129:
result_extend(encoded[i:i+1] * (258-n))
i += 1
except IndexError:
pass
return b''.join(result) if sys.version[0] == '2' else bytes(result)
@_replace_by('_tifffile.decodelzw')
def decodelzw(encoded):
"""Decompress LZW (Lempel-Ziv-Welch) encoded TIFF strip (byte string).
The strip must begin with a CLEAR code and end with an EOI code.
This is an implementation of the LZW decoding algorithm described in (1).
It is not compatible with old style LZW compressed files like quad-lzw.tif.
"""
len_encoded = len(encoded)
bitcount_max = len_encoded * 8
unpack = struct.unpack
if sys.version[0] == '2':
newtable = [chr(i) for i in range(256)]
else:
newtable = [bytes([i]) for i in range(256)]
newtable.extend((0, 0))
def next_code():
"""Return integer of `bitw` bits at `bitcount` position in encoded."""
start = bitcount // 8
s = encoded[start:start+4]
try:
code = unpack('>I', s)[0]
except Exception:
code = unpack('>I', s + b'\x00'*(4-len(s)))[0]
code <<= bitcount % 8
code &= mask
return code >> shr
switchbitch = { # code: bit-width, shr-bits, bit-mask
255: (9, 23, int(9*'1'+'0'*23, 2)),
511: (10, 22, int(10*'1'+'0'*22, 2)),
1023: (11, 21, int(11*'1'+'0'*21, 2)),
2047: (12, 20, int(12*'1'+'0'*20, 2)), }
bitw, shr, mask = switchbitch[255]
bitcount = 0
if len_encoded < 4:
raise ValueError("strip must be at least 4 characters long")
if next_code() != 256:
raise ValueError("strip must begin with CLEAR code")
code = 0
oldcode = 0
result = []
result_append = result.append
while True:
code = next_code() # ~5% faster when inlining this function
bitcount += bitw
if code == 257 or bitcount >= bitcount_max: # EOI
break
if code == 256: # CLEAR
table = newtable[:]
table_append = table.append
lentable = 258
bitw, shr, mask = switchbitch[255]
code = next_code()
bitcount += bitw
if code == 257: # EOI
break
result_append(table[code])
else:
if code < lentable:
decoded = table[code]
newcode = table[oldcode] + decoded[:1]
else:
newcode = table[oldcode]
newcode += newcode[:1]
decoded = newcode
result_append(decoded)
table_append(newcode)
lentable += 1
oldcode = code
if lentable in switchbitch:
bitw, shr, mask = switchbitch[lentable]
if code != 257:
warnings.warn("unexpected end of lzw stream (code %i)" % code)
return b''.join(result)
@_replace_by('_tifffile.unpackints')
def unpackints(data, dtype, itemsize, runlen=0):
"""Decompress byte string to array of integers of any bit size <= 32.
Parameters
----------
data : byte str
Data to decompress.
dtype : numpy.dtype or str
A numpy boolean or integer type.
itemsize : int
Number of bits per integer.
runlen : int
Number of consecutive integers, after which to start at next byte.
"""
if itemsize == 1: # bitarray
data = numpy.fromstring(data, '|B')
data = numpy.unpackbits(data)
if runlen % 8:
data = data.reshape(-1, runlen + (8 - runlen % 8))
data = data[:, :runlen].reshape(-1)
return data.astype(dtype)
dtype = numpy.dtype(dtype)
if itemsize in (8, 16, 32, 64):
return numpy.fromstring(data, dtype)
if itemsize < 1 or itemsize > 32:
raise ValueError("itemsize out of range: %i" % itemsize)
if dtype.kind not in "biu":
raise ValueError("invalid dtype")
itembytes = next(i for i in (1, 2, 4, 8) if 8 * i >= itemsize)
if itembytes != dtype.itemsize:
raise ValueError("dtype.itemsize too small")
if runlen == 0:
runlen = len(data) // itembytes
skipbits = runlen*itemsize % 8
if skipbits:
skipbits = 8 - skipbits
shrbits = itembytes*8 - itemsize
bitmask = int(itemsize*'1'+'0'*shrbits, 2)
dtypestr = '>' + dtype.char # dtype always big endian?
unpack = struct.unpack
l = runlen * (len(data)*8 // (runlen*itemsize + skipbits))
result = numpy.empty((l, ), dtype)
bitcount = 0
for i in range(len(result)):
start = bitcount // 8
s = data[start:start+itembytes]
try:
code = unpack(dtypestr, s)[0]
except Exception:
code = unpack(dtypestr, s + b'\x00'*(itembytes-len(s)))[0]
code <<= bitcount % 8
code &= bitmask
result[i] = code >> shrbits
bitcount += itemsize
if (i+1) % runlen == 0:
bitcount += skipbits
return result
def unpackrgb(data, dtype='<B', bitspersample=(5, 6, 5), rescale=True):
"""Return array from byte string containing packed samples.
Use to unpack RGB565 or RGB555 to RGB888 format.
Parameters
----------
data : byte str
The data to be decoded. Samples in each pixel are stored consecutively.
Pixels are aligned to 8, 16, or 32 bit boundaries.
dtype : numpy.dtype
The sample data type. The byteorder applies also to the data stream.
bitspersample : tuple
Number of bits for each sample in a pixel.
rescale : bool
Upscale samples to the number of bits in dtype.
Returns
-------
result : ndarray
Flattened array of unpacked samples of native dtype.
Examples
--------
>>> data = struct.pack('BBBB', 0x21, 0x08, 0xff, 0xff)
>>> print(unpackrgb(data, '<B', (5, 6, 5), False))
[ 1 1 1 31 63 31]
>>> print(unpackrgb(data, '<B', (5, 6, 5)))
[ 8 4 8 255 255 255]
>>> print(unpackrgb(data, '<B', (5, 5, 5)))
[ 16 8 8 255 255 255]
"""
dtype = numpy.dtype(dtype)
bits = int(numpy.sum(bitspersample))
if not (bits <= 32 and all(i <= dtype.itemsize*8 for i in bitspersample)):
raise ValueError("sample size not supported %s" % str(bitspersample))
dt = next(i for i in 'BHI' if numpy.dtype(i).itemsize*8 >= bits)
data = numpy.fromstring(data, dtype.byteorder+dt)
result = numpy.empty((data.size, len(bitspersample)), dtype.char)
for i, bps in enumerate(bitspersample):
t = data >> int(numpy.sum(bitspersample[i+1:]))
t &= int('0b'+'1'*bps, 2)
if rescale:
o = ((dtype.itemsize * 8) // bps + 1) * bps
if o > data.dtype.itemsize * 8:
t = t.astype('I')
t *= (2**o - 1) // (2**bps - 1)
t //= 2**(o - (dtype.itemsize * 8))
result[:, i] = t
return result.reshape(-1)
def reorient(image, orientation):
"""Return reoriented view of image array.
Parameters
----------
image : numpy array
Non-squeezed output of asarray() functions.
Axes -3 and -2 must be image length and width respectively.
orientation : int or str
One of TIFF_ORIENTATIONS keys or values.
"""
o = TIFF_ORIENTATIONS.get(orientation, orientation)
if o == 'top_left':
return image
elif o == 'top_right':
return image[..., ::-1, :]
elif o == 'bottom_left':
return image[..., ::-1, :, :]
elif o == 'bottom_right':
return image[..., ::-1, ::-1, :]
elif o == 'left_top':
return numpy.swapaxes(image, -3, -2)
elif o == 'right_top':
return numpy.swapaxes(image, -3, -2)[..., ::-1, :]
elif o == 'left_bottom':
return numpy.swapaxes(image, -3, -2)[..., ::-1, :, :]
elif o == 'right_bottom':
return numpy.swapaxes(image, -3, -2)[..., ::-1, ::-1, :]
def squeeze_axes(shape, axes, skip='XY'):
"""Return shape and axes with single-dimensional entries removed.
Remove unused dimensions unless their axes are listed in 'skip'.
>>> squeeze_axes((5, 1, 2, 1, 1), 'TZYXC')
((5, 2, 1), 'TYX')
"""
if len(shape) != len(axes):
raise ValueError("dimensions of axes and shape don't match")
shape, axes = zip(*(i for i in zip(shape, axes)
if i[0] > 1 or i[1] in skip))
return shape, ''.join(axes)
def transpose_axes(data, axes, asaxes='CTZYX'):
"""Return data with its axes permuted to match specified axes.
A view is returned if possible.
>>> transpose_axes(numpy.zeros((2, 3, 4, 5)), 'TYXC', asaxes='CTZYX').shape
(5, 2, 1, 3, 4)
"""
for ax in axes:
if ax not in asaxes:
raise ValueError("unknown axis %s" % ax)
# add missing axes to data
shape = data.shape
for ax in reversed(asaxes):
if ax not in axes:
axes = ax + axes
shape = (1,) + shape
data = data.reshape(shape)
# transpose axes
data = data.transpose([axes.index(ax) for ax in asaxes])
return data
def stack_pages(pages, memmap=False, *args, **kwargs):
"""Read data from sequence of TiffPage and stack them vertically.
If memmap is True, return an array stored in a binary file on disk.
Additional parameters are passsed to the page asarray function.
"""
if len(pages) == 0:
raise ValueError("no pages")
if len(pages) == 1:
return pages[0].asarray(memmap=memmap, *args, **kwargs)
result = pages[0].asarray(*args, **kwargs)
shape = (len(pages),) + result.shape
if memmap:
with tempfile.NamedTemporaryFile() as fh:
result = numpy.memmap(fh, dtype=result.dtype, shape=shape)
else:
result = numpy.empty(shape, dtype=result.dtype)
for i, page in enumerate(pages):
result[i] = page.asarray(*args, **kwargs)
return result
def stripnull(string):
"""Return string truncated at first null character.
Clean NULL terminated C strings.
>>> stripnull(b'string\\x00')
b'string'
"""
i = string.find(b'\x00')
return string if (i < 0) else string[:i]
def stripascii(string):
"""Return string truncated at last byte that is 7bit ASCII.
Clean NULL separated and terminated TIFF strings.
>>> stripascii(b'string\\x00string\\n\\x01\\x00')
b'string\\x00string\\n'
>>> stripascii(b'\\x00')
b''
"""
# TODO: pythonize this
ord_ = ord if sys.version_info[0] < 3 else lambda x: x
i = len(string)
while i:
i -= 1
if 8 < ord_(string[i]) < 127:
break
else:
i = -1
return string[:i+1]
def format_size(size):
"""Return file size as string from byte size."""
for unit in ('B', 'KB', 'MB', 'GB', 'TB'):
if size < 2048:
return "%.f %s" % (size, unit)
size /= 1024.0
def sequence(value):
"""Return tuple containing value if value is not a sequence.
>>> sequence(1)
(1,)
>>> sequence([1])
[1]
"""
try:
len(value)
return value
except TypeError:
return (value, )
def product(iterable):
"""Return product of sequence of numbers.
Equivalent of functools.reduce(operator.mul, iterable, 1).
>>> product([2**8, 2**30])
274877906944
>>> product([])
1
"""
prod = 1
for i in iterable:
prod *= i
return prod
def natural_sorted(iterable):
"""Return human sorted list of strings.
E.g. for sorting file names.
>>> natural_sorted(['f1', 'f2', 'f10'])
['f1', 'f2', 'f10']
"""
def sortkey(x):
return [(int(c) if c.isdigit() else c) for c in re.split(numbers, x)]
numbers = re.compile(r'(\d+)')
return sorted(iterable, key=sortkey)
def excel_datetime(timestamp, epoch=datetime.datetime.fromordinal(693594)):
"""Return datetime object from timestamp in Excel serial format.
Convert LSM time stamps.
>>> excel_datetime(40237.029999999795)
datetime.datetime(2010, 2, 28, 0, 43, 11, 999982)
"""
return epoch + datetime.timedelta(timestamp)
def julian_datetime(julianday, milisecond=0):
"""Return datetime from days since 1/1/4713 BC and ms since midnight.
Convert Julian dates according to MetaMorph.
>>> julian_datetime(2451576, 54362783)
datetime.datetime(2000, 2, 2, 15, 6, 2, 783)
"""
if julianday <= 1721423:
# no datetime before year 1
return None
a = julianday + 1
if a > 2299160:
alpha = math.trunc((a - 1867216.25) / 36524.25)
a += 1 + alpha - alpha // 4
b = a + (1524 if a > 1721423 else 1158)
c = math.trunc((b - 122.1) / 365.25)
d = math.trunc(365.25 * c)
e = math.trunc((b - d) / 30.6001)
day = b - d - math.trunc(30.6001 * e)
month = e - (1 if e < 13.5 else 13)
year = c - (4716 if month > 2.5 else 4715)
hour, milisecond = divmod(milisecond, 1000 * 60 * 60)
minute, milisecond = divmod(milisecond, 1000 * 60)
second, milisecond = divmod(milisecond, 1000)
return datetime.datetime(year, month, day,
hour, minute, second, milisecond)
def test_tifffile(directory='testimages', verbose=True):
"""Read all images in directory.
Print error message on failure.
>>> test_tifffile(verbose=False)
"""
successful = 0
failed = 0
start = time.time()
for f in glob.glob(os.path.join(directory, '*.*')):
if verbose:
print("\n%s>\n" % f.lower(), end='')
t0 = time.time()
try:
tif = TiffFile(f, multifile=True)
except Exception as e:
if not verbose:
print(f, end=' ')
print("ERROR:", e)
failed += 1
continue
try:
img = tif.asarray()
except ValueError:
try:
img = tif[0].asarray()
except Exception as e:
if not verbose:
print(f, end=' ')
print("ERROR:", e)
failed += 1
continue
finally:
tif.close()
successful += 1
if verbose:
print("%s, %s %s, %s, %.0f ms" % (
str(tif), str(img.shape), img.dtype, tif[0].compression,
(time.time()-t0) * 1e3))
if verbose:
print("\nSuccessfully read %i of %i files in %.3f s\n" % (
successful, successful+failed, time.time()-start))
class TIFF_SUBFILE_TYPES(object):
def __getitem__(self, key):
result = []
if key & 1:
result.append('reduced_image')
if key & 2:
result.append('page')
if key & 4:
result.append('mask')
return tuple(result)
TIFF_PHOTOMETRICS = {
0: 'miniswhite',
1: 'minisblack',
2: 'rgb',
3: 'palette',
4: 'mask',
5: 'separated', # CMYK
6: 'ycbcr',
8: 'cielab',
9: 'icclab',
10: 'itulab',
32803: 'cfa', # Color Filter Array
32844: 'logl',
32845: 'logluv',
34892: 'linear_raw'
}
TIFF_COMPESSIONS = {
1: None,
2: 'ccittrle',
3: 'ccittfax3',
4: 'ccittfax4',
5: 'lzw',
6: 'ojpeg',
7: 'jpeg',
8: 'adobe_deflate',
9: 't85',
10: 't43',
32766: 'next',
32771: 'ccittrlew',
32773: 'packbits',
32809: 'thunderscan',
32895: 'it8ctpad',
32896: 'it8lw',
32897: 'it8mp',
32898: 'it8bl',
32908: 'pixarfilm',
32909: 'pixarlog',
32946: 'deflate',
32947: 'dcs',
34661: 'jbig',
34676: 'sgilog',
34677: 'sgilog24',
34712: 'jp2000',
34713: 'nef',
}
TIFF_DECOMPESSORS = {
None: lambda x: x,
'adobe_deflate': zlib.decompress,
'deflate': zlib.decompress,
'packbits': decodepackbits,
'lzw': decodelzw,
# 'jpeg': decodejpg
}
TIFF_DATA_TYPES = {
1: '1B', # BYTE 8-bit unsigned integer.
2: '1s', # ASCII 8-bit byte that contains a 7-bit ASCII code;
# the last byte must be NULL (binary zero).
3: '1H', # SHORT 16-bit (2-byte) unsigned integer
4: '1I', # LONG 32-bit (4-byte) unsigned integer.
5: '2I', # RATIONAL Two LONGs: the first represents the numerator of
# a fraction; the second, the denominator.
6: '1b', # SBYTE An 8-bit signed (twos-complement) integer.
7: '1s', # UNDEFINED An 8-bit byte that may contain anything,
# depending on the definition of the field.
8: '1h', # SSHORT A 16-bit (2-byte) signed (twos-complement) integer.
9: '1i', # SLONG A 32-bit (4-byte) signed (twos-complement) integer.
10: '2i', # SRATIONAL Two SLONGs: the first represents the numerator
# of a fraction, the second the denominator.
11: '1f', # FLOAT Single precision (4-byte) IEEE format.
12: '1d', # DOUBLE Double precision (8-byte) IEEE format.
13: '1I', # IFD unsigned 4 byte IFD offset.
#14: '', # UNICODE
#15: '', # COMPLEX
16: '1Q', # LONG8 unsigned 8 byte integer (BigTiff)
17: '1q', # SLONG8 signed 8 byte integer (BigTiff)
18: '1Q', # IFD8 unsigned 8 byte IFD offset (BigTiff)
}
TIFF_SAMPLE_FORMATS = {
1: 'uint',
2: 'int',
3: 'float',
#4: 'void',
#5: 'complex_int',
6: 'complex',
}
TIFF_SAMPLE_DTYPES = {
('uint', 1): '?', # bitmap
('uint', 2): 'B',
('uint', 3): 'B',
('uint', 4): 'B',
('uint', 5): 'B',
('uint', 6): 'B',
('uint', 7): 'B',
('uint', 8): 'B',
('uint', 9): 'H',
('uint', 10): 'H',
('uint', 11): 'H',
('uint', 12): 'H',
('uint', 13): 'H',
('uint', 14): 'H',
('uint', 15): 'H',
('uint', 16): 'H',
('uint', 17): 'I',
('uint', 18): 'I',
('uint', 19): 'I',
('uint', 20): 'I',
('uint', 21): 'I',
('uint', 22): 'I',
('uint', 23): 'I',
('uint', 24): 'I',
('uint', 25): 'I',
('uint', 26): 'I',
('uint', 27): 'I',
('uint', 28): 'I',
('uint', 29): 'I',
('uint', 30): 'I',
('uint', 31): 'I',
('uint', 32): 'I',
('uint', 64): 'Q',
('int', 8): 'b',
('int', 16): 'h',
('int', 32): 'i',
('int', 64): 'q',
('float', 16): 'e',
('float', 32): 'f',
('float', 64): 'd',
('complex', 64): 'F',
('complex', 128): 'D',
('uint', (5, 6, 5)): 'B',
}
TIFF_ORIENTATIONS = {
1: 'top_left',
2: 'top_right',
3: 'bottom_right',
4: 'bottom_left',
5: 'left_top',
6: 'right_top',
7: 'right_bottom',
8: 'left_bottom',
}
# TODO: is there a standard for character axes labels?
AXES_LABELS = {
'X': 'width',
'Y': 'height',
'Z': 'depth',
'S': 'sample', # rgb(a)
'I': 'series', # general sequence, plane, page, IFD
'T': 'time',
'C': 'channel', # color, emission wavelength
'A': 'angle',
'P': 'phase', # formerly F # P is Position in LSM!
'R': 'tile', # region, point, mosaic
'H': 'lifetime', # histogram
'E': 'lambda', # excitation wavelength
'L': 'exposure', # lux
'V': 'event',
'Q': 'other',
#'M': 'mosaic', # LSM 6
}
AXES_LABELS.update(dict((v, k) for k, v in AXES_LABELS.items()))
# Map OME pixel types to numpy dtype
OME_PIXEL_TYPES = {
'int8': 'i1',
'int16': 'i2',
'int32': 'i4',
'uint8': 'u1',
'uint16': 'u2',
'uint32': 'u4',
'float': 'f4',
# 'bit': 'bit',
'double': 'f8',
'complex': 'c8',
'double-complex': 'c16',
}
# NIH Image PicHeader v1.63
NIH_IMAGE_HEADER = [
('fileid', 'a8'),
('nlines', 'i2'),
('pixelsperline', 'i2'),
('version', 'i2'),
('oldlutmode', 'i2'),
('oldncolors', 'i2'),
('colors', 'u1', (3, 32)),
('oldcolorstart', 'i2'),
('colorwidth', 'i2'),
('extracolors', 'u2', (6, 3)),
('nextracolors', 'i2'),
('foregroundindex', 'i2'),
('backgroundindex', 'i2'),
('xscale', 'f8'),
('_x0', 'i2'),
('_x1', 'i2'),
('units_t', 'i2'), # NIH_UNITS_TYPE
('p1', [('x', 'i2'), ('y', 'i2')]),
('p2', [('x', 'i2'), ('y', 'i2')]),
('curvefit_t', 'i2'), # NIH_CURVEFIT_TYPE
('ncoefficients', 'i2'),
('coeff', 'f8', 6),
('_um_len', 'u1'),
('um', 'a15'),
('_x2', 'u1'),
('binarypic', 'b1'),
('slicestart', 'i2'),
('sliceend', 'i2'),
('scalemagnification', 'f4'),
('nslices', 'i2'),
('slicespacing', 'f4'),
('currentslice', 'i2'),
('frameinterval', 'f4'),
('pixelaspectratio', 'f4'),
('colorstart', 'i2'),
('colorend', 'i2'),
('ncolors', 'i2'),
('fill1', '3u2'),
('fill2', '3u2'),
('colortable_t', 'u1'), # NIH_COLORTABLE_TYPE
('lutmode_t', 'u1'), # NIH_LUTMODE_TYPE
('invertedtable', 'b1'),
('zeroclip', 'b1'),
('_xunit_len', 'u1'),
('xunit', 'a11'),
('stacktype_t', 'i2'), # NIH_STACKTYPE_TYPE
]
NIH_COLORTABLE_TYPE = (
'CustomTable', 'AppleDefault', 'Pseudo20', 'Pseudo32', 'Rainbow',
'Fire1', 'Fire2', 'Ice', 'Grays', 'Spectrum')
NIH_LUTMODE_TYPE = (
'PseudoColor', 'OldAppleDefault', 'OldSpectrum', 'GrayScale',
'ColorLut', 'CustomGrayscale')
NIH_CURVEFIT_TYPE = (
'StraightLine', 'Poly2', 'Poly3', 'Poly4', 'Poly5', 'ExpoFit',
'PowerFit', 'LogFit', 'RodbardFit', 'SpareFit1', 'Uncalibrated',
'UncalibratedOD')
NIH_UNITS_TYPE = (
'Nanometers', 'Micrometers', 'Millimeters', 'Centimeters', 'Meters',
'Kilometers', 'Inches', 'Feet', 'Miles', 'Pixels', 'OtherUnits')
NIH_STACKTYPE_TYPE = (
'VolumeStack', 'RGBStack', 'MovieStack', 'HSVStack')
# Map Universal Imaging Corporation MetaMorph internal tag ids to name and type
UIC_TAGS = {
0: ('auto_scale', int),
1: ('min_scale', int),
2: ('max_scale', int),
3: ('spatial_calibration', int),
4: ('x_calibration', Fraction),
5: ('y_calibration', Fraction),
6: ('calibration_units', str),
7: ('name', str),
8: ('thresh_state', int),
9: ('thresh_state_red', int),
10: ('tagid_10', None), # undefined
11: ('thresh_state_green', int),
12: ('thresh_state_blue', int),
13: ('thresh_state_lo', int),
14: ('thresh_state_hi', int),
15: ('zoom', int),
16: ('create_time', julian_datetime),
17: ('last_saved_time', julian_datetime),
18: ('current_buffer', int),
19: ('gray_fit', None),
20: ('gray_point_count', None),
21: ('gray_x', Fraction),
22: ('gray_y', Fraction),
23: ('gray_min', Fraction),
24: ('gray_max', Fraction),
25: ('gray_unit_name', str),
26: ('standard_lut', int),
27: ('wavelength', int),
28: ('stage_position', '(%i,2,2)u4'), # N xy positions as fractions
29: ('camera_chip_offset', '(%i,2,2)u4'), # N xy offsets as fractions
30: ('overlay_mask', None),
31: ('overlay_compress', None),
32: ('overlay', None),
33: ('special_overlay_mask', None),
34: ('special_overlay_compress', None),
35: ('special_overlay', None),
36: ('image_property', read_uic_image_property),
37: ('stage_label', '%ip'), # N str
38: ('autoscale_lo_info', Fraction),
39: ('autoscale_hi_info', Fraction),
40: ('absolute_z', '(%i,2)u4'), # N fractions
41: ('absolute_z_valid', '(%i,)u4'), # N long
42: ('gamma', int),
43: ('gamma_red', int),
44: ('gamma_green', int),
45: ('gamma_blue', int),
46: ('camera_bin', int),
47: ('new_lut', int),
48: ('image_property_ex', None),
49: ('plane_property', int),
50: ('user_lut_table', '(256,3)u1'),
51: ('red_autoscale_info', int),
52: ('red_autoscale_lo_info', Fraction),
53: ('red_autoscale_hi_info', Fraction),
54: ('red_minscale_info', int),
55: ('red_maxscale_info', int),
56: ('green_autoscale_info', int),
57: ('green_autoscale_lo_info', Fraction),
58: ('green_autoscale_hi_info', Fraction),
59: ('green_minscale_info', int),
60: ('green_maxscale_info', int),
61: ('blue_autoscale_info', int),
62: ('blue_autoscale_lo_info', Fraction),
63: ('blue_autoscale_hi_info', Fraction),
64: ('blue_min_scale_info', int),
65: ('blue_max_scale_info', int),
#66: ('overlay_plane_color', read_uic_overlay_plane_color),
}
# Olympus FluoView
MM_DIMENSION = [
('name', 'a16'),
('size', 'i4'),
('origin', 'f8'),
('resolution', 'f8'),
('unit', 'a64'),
]
MM_HEADER = [
('header_flag', 'i2'),
('image_type', 'u1'),
('image_name', 'a257'),
('offset_data', 'u4'),
('palette_size', 'i4'),
('offset_palette0', 'u4'),
('offset_palette1', 'u4'),
('comment_size', 'i4'),
('offset_comment', 'u4'),
('dimensions', MM_DIMENSION, 10),
('offset_position', 'u4'),
('map_type', 'i2'),
('map_min', 'f8'),
('map_max', 'f8'),
('min_value', 'f8'),
('max_value', 'f8'),
('offset_map', 'u4'),
('gamma', 'f8'),
('offset', 'f8'),
('gray_channel', MM_DIMENSION),
('offset_thumbnail', 'u4'),
('voice_field', 'i4'),
('offset_voice_field', 'u4'),
]
# Carl Zeiss LSM
CZ_LSM_INFO = [
('magic_number', 'u4'),
('structure_size', 'i4'),
('dimension_x', 'i4'),
('dimension_y', 'i4'),
('dimension_z', 'i4'),
('dimension_channels', 'i4'),
('dimension_time', 'i4'),
('data_type', 'i4'), # CZ_DATA_TYPES
('thumbnail_x', 'i4'),
('thumbnail_y', 'i4'),
('voxel_size_x', 'f8'),
('voxel_size_y', 'f8'),
('voxel_size_z', 'f8'),
('origin_x', 'f8'),
('origin_y', 'f8'),
('origin_z', 'f8'),
('scan_type', 'u2'),
('spectral_scan', 'u2'),
('type_of_data', 'u4'), # CZ_TYPE_OF_DATA
('offset_vector_overlay', 'u4'),
('offset_input_lut', 'u4'),
('offset_output_lut', 'u4'),
('offset_channel_colors', 'u4'),
('time_interval', 'f8'),
('offset_channel_data_types', 'u4'),
('offset_scan_info', 'u4'), # CZ_LSM_SCAN_INFO
('offset_ks_data', 'u4'),
('offset_time_stamps', 'u4'),
('offset_event_list', 'u4'),
('offset_roi', 'u4'),
('offset_bleach_roi', 'u4'),
('offset_next_recording', 'u4'),
# LSM 2.0 ends here
('display_aspect_x', 'f8'),
('display_aspect_y', 'f8'),
('display_aspect_z', 'f8'),
('display_aspect_time', 'f8'),
('offset_mean_of_roi_overlay', 'u4'),
('offset_topo_isoline_overlay', 'u4'),
('offset_topo_profile_overlay', 'u4'),
('offset_linescan_overlay', 'u4'),
('offset_toolbar_flags', 'u4'),
('offset_channel_wavelength', 'u4'),
('offset_channel_factors', 'u4'),
('objective_sphere_correction', 'f8'),
('offset_unmix_parameters', 'u4'),
# LSM 3.2, 4.0 end here
('offset_acquisition_parameters', 'u4'),
('offset_characteristics', 'u4'),
('offset_palette', 'u4'),
('time_difference_x', 'f8'),
('time_difference_y', 'f8'),
('time_difference_z', 'f8'),
('internal_use_1', 'u4'),
('dimension_p', 'i4'),
('dimension_m', 'i4'),
('dimensions_reserved', '16i4'),
('offset_tile_positions', 'u4'),
('reserved_1', '9u4'),
('offset_positions', 'u4'),
('reserved_2', '21u4'), # must be 0
]
# Import functions for LSM_INFO sub-records
CZ_LSM_INFO_READERS = {
'scan_info': read_cz_lsm_scan_info,
'time_stamps': read_cz_lsm_time_stamps,
'event_list': read_cz_lsm_event_list,
'channel_colors': read_cz_lsm_floatpairs,
'positions': read_cz_lsm_floatpairs,
'tile_positions': read_cz_lsm_floatpairs,
}
# Map cz_lsm_info.scan_type to dimension order
CZ_SCAN_TYPES = {
0: 'XYZCT', # x-y-z scan
1: 'XYZCT', # z scan (x-z plane)
2: 'XYZCT', # line scan
3: 'XYTCZ', # time series x-y
4: 'XYZTC', # time series x-z
5: 'XYTCZ', # time series 'Mean of ROIs'
6: 'XYZTC', # time series x-y-z
7: 'XYCTZ', # spline scan
8: 'XYCZT', # spline scan x-z
9: 'XYTCZ', # time series spline plane x-z
10: 'XYZCT', # point mode
}
# Map dimension codes to cz_lsm_info attribute
CZ_DIMENSIONS = {
'X': 'dimension_x',
'Y': 'dimension_y',
'Z': 'dimension_z',
'C': 'dimension_channels',
'T': 'dimension_time',
}
# Description of cz_lsm_info.data_type
CZ_DATA_TYPES = {
0: 'varying data types',
1: '8 bit unsigned integer',
2: '12 bit unsigned integer',
5: '32 bit float',
}
# Description of cz_lsm_info.type_of_data
CZ_TYPE_OF_DATA = {
0: 'Original scan data',
1: 'Calculated data',
2: '3D reconstruction',
3: 'Topography height map',
}
CZ_LSM_SCAN_INFO_ARRAYS = {
0x20000000: "tracks",
0x30000000: "lasers",
0x60000000: "detection_channels",
0x80000000: "illumination_channels",
0xa0000000: "beam_splitters",
0xc0000000: "data_channels",
0x11000000: "timers",
0x13000000: "markers",
}
CZ_LSM_SCAN_INFO_STRUCTS = {
# 0x10000000: "recording",
0x40000000: "track",
0x50000000: "laser",
0x70000000: "detection_channel",
0x90000000: "illumination_channel",
0xb0000000: "beam_splitter",
0xd0000000: "data_channel",
0x12000000: "timer",
0x14000000: "marker",
}
CZ_LSM_SCAN_INFO_ATTRIBUTES = {
# recording
0x10000001: "name",
0x10000002: "description",
0x10000003: "notes",
0x10000004: "objective",
0x10000005: "processing_summary",
0x10000006: "special_scan_mode",
0x10000007: "scan_type",
0x10000008: "scan_mode",
0x10000009: "number_of_stacks",
0x1000000a: "lines_per_plane",
0x1000000b: "samples_per_line",
0x1000000c: "planes_per_volume",
0x1000000d: "images_width",
0x1000000e: "images_height",
0x1000000f: "images_number_planes",
0x10000010: "images_number_stacks",
0x10000011: "images_number_channels",
0x10000012: "linscan_xy_size",
0x10000013: "scan_direction",
0x10000014: "time_series",
0x10000015: "original_scan_data",
0x10000016: "zoom_x",
0x10000017: "zoom_y",
0x10000018: "zoom_z",
0x10000019: "sample_0x",
0x1000001a: "sample_0y",
0x1000001b: "sample_0z",
0x1000001c: "sample_spacing",
0x1000001d: "line_spacing",
0x1000001e: "plane_spacing",
0x1000001f: "plane_width",
0x10000020: "plane_height",
0x10000021: "volume_depth",
0x10000023: "nutation",
0x10000034: "rotation",
0x10000035: "precession",
0x10000036: "sample_0time",
0x10000037: "start_scan_trigger_in",
0x10000038: "start_scan_trigger_out",
0x10000039: "start_scan_event",
0x10000040: "start_scan_time",
0x10000041: "stop_scan_trigger_in",
0x10000042: "stop_scan_trigger_out",
0x10000043: "stop_scan_event",
0x10000044: "stop_scan_time",
0x10000045: "use_rois",
0x10000046: "use_reduced_memory_rois",
0x10000047: "user",
0x10000048: "use_bc_correction",
0x10000049: "position_bc_correction1",
0x10000050: "position_bc_correction2",
0x10000051: "interpolation_y",
0x10000052: "camera_binning",
0x10000053: "camera_supersampling",
0x10000054: "camera_frame_width",
0x10000055: "camera_frame_height",
0x10000056: "camera_offset_x",
0x10000057: "camera_offset_y",
0x10000059: "rt_binning",
0x1000005a: "rt_frame_width",
0x1000005b: "rt_frame_height",
0x1000005c: "rt_region_width",
0x1000005d: "rt_region_height",
0x1000005e: "rt_offset_x",
0x1000005f: "rt_offset_y",
0x10000060: "rt_zoom",
0x10000061: "rt_line_period",
0x10000062: "prescan",
0x10000063: "scan_direction_z",
# track
0x40000001: "multiplex_type", # 0 after line; 1 after frame
0x40000002: "multiplex_order",
0x40000003: "sampling_mode", # 0 sample; 1 line average; 2 frame average
0x40000004: "sampling_method", # 1 mean; 2 sum
0x40000005: "sampling_number",
0x40000006: "acquire",
0x40000007: "sample_observation_time",
0x4000000b: "time_between_stacks",
0x4000000c: "name",
0x4000000d: "collimator1_name",
0x4000000e: "collimator1_position",
0x4000000f: "collimator2_name",
0x40000010: "collimator2_position",
0x40000011: "is_bleach_track",
0x40000012: "is_bleach_after_scan_number",
0x40000013: "bleach_scan_number",
0x40000014: "trigger_in",
0x40000015: "trigger_out",
0x40000016: "is_ratio_track",
0x40000017: "bleach_count",
0x40000018: "spi_center_wavelength",
0x40000019: "pixel_time",
0x40000021: "condensor_frontlens",
0x40000023: "field_stop_value",
0x40000024: "id_condensor_aperture",
0x40000025: "condensor_aperture",
0x40000026: "id_condensor_revolver",
0x40000027: "condensor_filter",
0x40000028: "id_transmission_filter1",
0x40000029: "id_transmission1",
0x40000030: "id_transmission_filter2",
0x40000031: "id_transmission2",
0x40000032: "repeat_bleach",
0x40000033: "enable_spot_bleach_pos",
0x40000034: "spot_bleach_posx",
0x40000035: "spot_bleach_posy",
0x40000036: "spot_bleach_posz",
0x40000037: "id_tubelens",
0x40000038: "id_tubelens_position",
0x40000039: "transmitted_light",
0x4000003a: "reflected_light",
0x4000003b: "simultan_grab_and_bleach",
0x4000003c: "bleach_pixel_time",
# laser
0x50000001: "name",
0x50000002: "acquire",
0x50000003: "power",
# detection_channel
0x70000001: "integration_mode",
0x70000002: "special_mode",
0x70000003: "detector_gain_first",
0x70000004: "detector_gain_last",
0x70000005: "amplifier_gain_first",
0x70000006: "amplifier_gain_last",
0x70000007: "amplifier_offs_first",
0x70000008: "amplifier_offs_last",
0x70000009: "pinhole_diameter",
0x7000000a: "counting_trigger",
0x7000000b: "acquire",
0x7000000c: "point_detector_name",
0x7000000d: "amplifier_name",
0x7000000e: "pinhole_name",
0x7000000f: "filter_set_name",
0x70000010: "filter_name",
0x70000013: "integrator_name",
0x70000014: "channel_name",
0x70000015: "detector_gain_bc1",
0x70000016: "detector_gain_bc2",
0x70000017: "amplifier_gain_bc1",
0x70000018: "amplifier_gain_bc2",
0x70000019: "amplifier_offset_bc1",
0x70000020: "amplifier_offset_bc2",
0x70000021: "spectral_scan_channels",
0x70000022: "spi_wavelength_start",
0x70000023: "spi_wavelength_stop",
0x70000026: "dye_name",
0x70000027: "dye_folder",
# illumination_channel
0x90000001: "name",
0x90000002: "power",
0x90000003: "wavelength",
0x90000004: "aquire",
0x90000005: "detchannel_name",
0x90000006: "power_bc1",
0x90000007: "power_bc2",
# beam_splitter
0xb0000001: "filter_set",
0xb0000002: "filter",
0xb0000003: "name",
# data_channel
0xd0000001: "name",
0xd0000003: "acquire",
0xd0000004: "color",
0xd0000005: "sample_type",
0xd0000006: "bits_per_sample",
0xd0000007: "ratio_type",
0xd0000008: "ratio_track1",
0xd0000009: "ratio_track2",
0xd000000a: "ratio_channel1",
0xd000000b: "ratio_channel2",
0xd000000c: "ratio_const1",
0xd000000d: "ratio_const2",
0xd000000e: "ratio_const3",
0xd000000f: "ratio_const4",
0xd0000010: "ratio_const5",
0xd0000011: "ratio_const6",
0xd0000012: "ratio_first_images1",
0xd0000013: "ratio_first_images2",
0xd0000014: "dye_name",
0xd0000015: "dye_folder",
0xd0000016: "spectrum",
0xd0000017: "acquire",
# timer
0x12000001: "name",
0x12000002: "description",
0x12000003: "interval",
0x12000004: "trigger_in",
0x12000005: "trigger_out",
0x12000006: "activation_time",
0x12000007: "activation_number",
# marker
0x14000001: "name",
0x14000002: "description",
0x14000003: "trigger_in",
0x14000004: "trigger_out",
}
# Map TIFF tag code to attribute name, default value, type, count, validator
TIFF_TAGS = {
254: ('new_subfile_type', 0, 4, 1, TIFF_SUBFILE_TYPES()),
255: ('subfile_type', None, 3, 1,
{0: 'undefined', 1: 'image', 2: 'reduced_image', 3: 'page'}),
256: ('image_width', None, 4, 1, None),
257: ('image_length', None, 4, 1, None),
258: ('bits_per_sample', 1, 3, 1, None),
259: ('compression', 1, 3, 1, TIFF_COMPESSIONS),
262: ('photometric', None, 3, 1, TIFF_PHOTOMETRICS),
266: ('fill_order', 1, 3, 1, {1: 'msb2lsb', 2: 'lsb2msb'}),
269: ('document_name', None, 2, None, None),
270: ('image_description', None, 2, None, None),
271: ('make', None, 2, None, None),
272: ('model', None, 2, None, None),
273: ('strip_offsets', None, 4, None, None),
274: ('orientation', 1, 3, 1, TIFF_ORIENTATIONS),
277: ('samples_per_pixel', 1, 3, 1, None),
278: ('rows_per_strip', 2**32-1, 4, 1, None),
279: ('strip_byte_counts', None, 4, None, None),
280: ('min_sample_value', None, 3, None, None),
281: ('max_sample_value', None, 3, None, None), # 2**bits_per_sample
282: ('x_resolution', None, 5, 1, None),
283: ('y_resolution', None, 5, 1, None),
284: ('planar_configuration', 1, 3, 1, {1: 'contig', 2: 'separate'}),
285: ('page_name', None, 2, None, None),
286: ('x_position', None, 5, 1, None),
287: ('y_position', None, 5, 1, None),
296: ('resolution_unit', 2, 4, 1, {1: 'none', 2: 'inch', 3: 'centimeter'}),
297: ('page_number', None, 3, 2, None),
305: ('software', None, 2, None, None),
306: ('datetime', None, 2, None, None),
315: ('artist', None, 2, None, None),
316: ('host_computer', None, 2, None, None),
317: ('predictor', 1, 3, 1, {1: None, 2: 'horizontal'}),
318: ('white_point', None, 5, 2, None),
319: ('primary_chromaticities', None, 5, 6, None),
320: ('color_map', None, 3, None, None),
322: ('tile_width', None, 4, 1, None),
323: ('tile_length', None, 4, 1, None),
324: ('tile_offsets', None, 4, None, None),
325: ('tile_byte_counts', None, 4, None, None),
338: ('extra_samples', None, 3, None,
{0: 'unspecified', 1: 'assocalpha', 2: 'unassalpha'}),
339: ('sample_format', 1, 3, 1, TIFF_SAMPLE_FORMATS),
340: ('smin_sample_value', None, None, None, None),
341: ('smax_sample_value', None, None, None, None),
347: ('jpeg_tables', None, 7, None, None),
530: ('ycbcr_subsampling', 1, 3, 2, None),
531: ('ycbcr_positioning', 1, 3, 1, None),
32996: ('sgi_matteing', None, None, 1, None), # use extra_samples
32996: ('sgi_datatype', None, None, 1, None), # use sample_format
32997: ('image_depth', None, 4, 1, None),
32998: ('tile_depth', None, 4, 1, None),
33432: ('copyright', None, 1, None, None),
33445: ('md_file_tag', None, 4, 1, None),
33446: ('md_scale_pixel', None, 5, 1, None),
33447: ('md_color_table', None, 3, None, None),
33448: ('md_lab_name', None, 2, None, None),
33449: ('md_sample_info', None, 2, None, None),
33450: ('md_prep_date', None, 2, None, None),
33451: ('md_prep_time', None, 2, None, None),
33452: ('md_file_units', None, 2, None, None),
33550: ('model_pixel_scale', None, 12, 3, None),
33922: ('model_tie_point', None, 12, None, None),
34665: ('exif_ifd', None, None, 1, None),
34735: ('geo_key_directory', None, 3, None, None),
34736: ('geo_double_params', None, 12, None, None),
34737: ('geo_ascii_params', None, 2, None, None),
34853: ('gps_ifd', None, None, 1, None),
37510: ('user_comment', None, None, None, None),
42112: ('gdal_metadata', None, 2, None, None),
42113: ('gdal_nodata', None, 2, None, None),
50289: ('mc_xy_position', None, 12, 2, None),
50290: ('mc_z_position', None, 12, 1, None),
50291: ('mc_xy_calibration', None, 12, 3, None),
50292: ('mc_lens_lem_na_n', None, 12, 3, None),
50293: ('mc_channel_name', None, 1, None, None),
50294: ('mc_ex_wavelength', None, 12, 1, None),
50295: ('mc_time_stamp', None, 12, 1, None),
50838: ('imagej_byte_counts', None, None, None, None),
65200: ('flex_xml', None, 2, None, None),
# code: (attribute name, default value, type, count, validator)
}
# Map custom TIFF tag codes to attribute names and import functions
CUSTOM_TAGS = {
700: ('xmp', read_bytes),
34377: ('photoshop', read_numpy),
33723: ('iptc', read_bytes),
34675: ('icc_profile', read_bytes),
33628: ('uic1tag', read_uic1tag), # Universal Imaging Corporation STK
33629: ('uic2tag', read_uic2tag),
33630: ('uic3tag', read_uic3tag),
33631: ('uic4tag', read_uic4tag),
34361: ('mm_header', read_mm_header), # Olympus FluoView
34362: ('mm_stamp', read_mm_stamp),
34386: ('mm_user_block', read_bytes),
34412: ('cz_lsm_info', read_cz_lsm_info), # Carl Zeiss LSM
43314: ('nih_image_header', read_nih_image_header),
# 40001: ('mc_ipwinscal', read_bytes),
40100: ('mc_id_old', read_bytes),
50288: ('mc_id', read_bytes),
50296: ('mc_frame_properties', read_bytes),
50839: ('imagej_metadata', read_bytes),
51123: ('micromanager_metadata', read_json),
}
# Max line length of printed output
PRINT_LINE_LEN = 79
def imshow(data, title=None, vmin=0, vmax=None, cmap=None,
bitspersample=None, photometric='rgb', interpolation='nearest',
dpi=96, figure=None, subplot=111, maxdim=8192, **kwargs):
"""Plot n-dimensional images using matplotlib.pyplot.
Return figure, subplot and plot axis.
Requires pyplot already imported ``from matplotlib import pyplot``.
Parameters
----------
bitspersample : int or None
Number of bits per channel in integer RGB images.
photometric : {'miniswhite', 'minisblack', 'rgb', or 'palette'}
The color space of the image data.
title : str
Window and subplot title.
figure : matplotlib.figure.Figure (optional).
Matplotlib to use for plotting.
subplot : int
A matplotlib.pyplot.subplot axis.
maxdim : int
maximum image size in any dimension.
kwargs : optional
Arguments for matplotlib.pyplot.imshow.
"""
#if photometric not in ('miniswhite', 'minisblack', 'rgb', 'palette'):
# raise ValueError("Can't handle %s photometrics" % photometric)
# TODO: handle photometric == 'separated' (CMYK)
isrgb = photometric in ('rgb', 'palette')
data = numpy.atleast_2d(data.squeeze())
data = data[(slice(0, maxdim), ) * len(data.shape)]
dims = data.ndim
if dims < 2:
raise ValueError("not an image")
elif dims == 2:
dims = 0
isrgb = False
else:
if isrgb and data.shape[-3] in (3, 4):
data = numpy.swapaxes(data, -3, -2)
data = numpy.swapaxes(data, -2, -1)
elif not isrgb and (data.shape[-1] < data.shape[-2] // 16 and
data.shape[-1] < data.shape[-3] // 16 and
data.shape[-1] < 5):
data = numpy.swapaxes(data, -3, -1)
data = numpy.swapaxes(data, -2, -1)
isrgb = isrgb and data.shape[-1] in (3, 4)
dims -= 3 if isrgb else 2
if photometric == 'palette' and isrgb:
datamax = data.max()
if datamax > 255:
data >>= 8 # possible precision loss
data = data.astype('B')
elif data.dtype.kind in 'ui':
if not (isrgb and data.dtype.itemsize <= 1) or bitspersample is None:
try:
bitspersample = int(math.ceil(math.log(data.max(), 2)))
except Exception:
bitspersample = data.dtype.itemsize * 8
elif not isinstance(bitspersample, int):
# bitspersample can be tuple, e.g. (5, 6, 5)
bitspersample = data.dtype.itemsize * 8
datamax = 2**bitspersample
if isrgb:
if bitspersample < 8:
data <<= 8 - bitspersample
elif bitspersample > 8:
data >>= bitspersample - 8 # precision loss
data = data.astype('B')
elif data.dtype.kind == 'f':
datamax = data.max()
if isrgb and datamax > 1.0:
if data.dtype.char == 'd':
data = data.astype('f')
data /= datamax
elif data.dtype.kind == 'b':
datamax = 1
elif data.dtype.kind == 'c':
raise NotImplementedError("complex type") # TODO: handle complex types
if not isrgb:
if vmax is None:
vmax = datamax
if vmin is None:
if data.dtype.kind == 'i':
dtmin = numpy.iinfo(data.dtype).min
vmin = numpy.min(data)
if vmin == dtmin:
vmin = numpy.min(data > dtmin)
if data.dtype.kind == 'f':
dtmin = numpy.finfo(data.dtype).min
vmin = numpy.min(data)
if vmin == dtmin:
vmin = numpy.min(data > dtmin)
else:
vmin = 0
pyplot = sys.modules['matplotlib.pyplot']
if figure is None:
pyplot.rc('font', family='sans-serif', weight='normal', size=8)
figure = pyplot.figure(dpi=dpi, figsize=(10.3, 6.3), frameon=True,
facecolor='1.0', edgecolor='w')
try:
figure.canvas.manager.window.title(title)
except Exception:
pass
pyplot.subplots_adjust(bottom=0.03*(dims+2), top=0.9,
left=0.1, right=0.95, hspace=0.05, wspace=0.0)
subplot = pyplot.subplot(subplot)
if title:
try:
title = unicode(title, 'Windows-1252')
except TypeError:
pass
pyplot.title(title, size=11)
if cmap is None:
if data.dtype.kind in 'ubf' or vmin == 0:
cmap = 'cubehelix'
else:
cmap = 'coolwarm'
if photometric == 'miniswhite':
cmap += '_r'
image = pyplot.imshow(data[(0, ) * dims].squeeze(), vmin=vmin, vmax=vmax,
cmap=cmap, interpolation=interpolation, **kwargs)
if not isrgb:
pyplot.colorbar() # panchor=(0.55, 0.5), fraction=0.05
def format_coord(x, y):
# callback function to format coordinate display in toolbar
x = int(x + 0.5)
y = int(y + 0.5)
try:
if dims:
return "%s @ %s [%4i, %4i]" % (cur_ax_dat[1][y, x],
current, x, y)
else:
return "%s @ [%4i, %4i]" % (data[y, x], x, y)
except IndexError:
return ""
pyplot.gca().format_coord = format_coord
if dims:
current = list((0, ) * dims)
cur_ax_dat = [0, data[tuple(current)].squeeze()]
sliders = [pyplot.Slider(
pyplot.axes([0.125, 0.03*(axis+1), 0.725, 0.025]),
'Dimension %i' % axis, 0, data.shape[axis]-1, 0, facecolor='0.5',
valfmt='%%.0f [%i]' % data.shape[axis]) for axis in range(dims)]
for slider in sliders:
slider.drawon = False
def set_image(current, sliders=sliders, data=data):
# change image and redraw canvas
cur_ax_dat[1] = data[tuple(current)].squeeze()
image.set_data(cur_ax_dat[1])
for ctrl, index in zip(sliders, current):
ctrl.eventson = False
ctrl.set_val(index)
ctrl.eventson = True
figure.canvas.draw()
def on_changed(index, axis, data=data, current=current):
# callback function for slider change event
index = int(round(index))
cur_ax_dat[0] = axis
if index == current[axis]:
return
if index >= data.shape[axis]:
index = 0
elif index < 0:
index = data.shape[axis] - 1
current[axis] = index
set_image(current)
def on_keypressed(event, data=data, current=current):
# callback function for key press event
key = event.key
axis = cur_ax_dat[0]
if str(key) in '0123456789':
on_changed(key, axis)
elif key == 'right':
on_changed(current[axis] + 1, axis)
elif key == 'left':
on_changed(current[axis] - 1, axis)
elif key == 'up':
cur_ax_dat[0] = 0 if axis == len(data.shape)-1 else axis + 1
elif key == 'down':
cur_ax_dat[0] = len(data.shape)-1 if axis == 0 else axis - 1
elif key == 'end':
on_changed(data.shape[axis] - 1, axis)
elif key == 'home':
on_changed(0, axis)
figure.canvas.mpl_connect('key_press_event', on_keypressed)
for axis, ctrl in enumerate(sliders):
ctrl.on_changed(lambda k, a=axis: on_changed(k, a))
return figure, subplot, image
def _app_show():
"""Block the GUI. For use as skimage plugin."""
pyplot = sys.modules['matplotlib.pyplot']
pyplot.show()
def main(argv=None):
"""Command line usage main function."""
if float(sys.version[0:3]) < 2.6:
print("This script requires Python version 2.6 or better.")
print("This is Python version %s" % sys.version)
return 0
if argv is None:
argv = sys.argv
import optparse
parser = optparse.OptionParser(
usage="usage: %prog [options] path",
description="Display image data in TIFF files.",
version="%%prog %s" % __version__)
opt = parser.add_option
opt('-p', '--page', dest='page', type='int', default=-1,
help="display single page")
opt('-s', '--series', dest='series', type='int', default=-1,
help="display series of pages of same shape")
opt('--nomultifile', dest='nomultifile', action='store_true',
default=False, help="don't read OME series from multiple files")
opt('--noplot', dest='noplot', action='store_true', default=False,
help="don't display images")
opt('--interpol', dest='interpol', metavar='INTERPOL', default='bilinear',
help="image interpolation method")
opt('--dpi', dest='dpi', type='int', default=96,
help="set plot resolution")
opt('--debug', dest='debug', action='store_true', default=False,
help="raise exception on failures")
opt('--test', dest='test', action='store_true', default=False,
help="try read all images in path")
opt('--doctest', dest='doctest', action='store_true', default=False,
help="runs the docstring examples")
opt('-v', '--verbose', dest='verbose', action='store_true', default=True)
opt('-q', '--quiet', dest='verbose', action='store_false')
settings, path = parser.parse_args()
path = ' '.join(path)
if settings.doctest:
import doctest
doctest.testmod()
return 0
if not path:
parser.error("No file specified")
if settings.test:
test_tifffile(path, settings.verbose)
return 0
if any(i in path for i in '?*'):
path = glob.glob(path)
if not path:
print('no files match the pattern')
return 0
# TODO: handle image sequences
#if len(path) == 1:
path = path[0]
print("Reading file structure...", end=' ')
start = time.time()
try:
tif = TiffFile(path, multifile=not settings.nomultifile)
except Exception as e:
if settings.debug:
raise
else:
print("\n", e)
sys.exit(0)
print("%.3f ms" % ((time.time()-start) * 1e3))
if tif.is_ome:
settings.norgb = True
images = [(None, tif[0 if settings.page < 0 else settings.page])]
if not settings.noplot:
print("Reading image data... ", end=' ')
def notnone(x):
return next(i for i in x if i is not None)
start = time.time()
try:
if settings.page >= 0:
images = [(tif.asarray(key=settings.page),
tif[settings.page])]
elif settings.series >= 0:
images = [(tif.asarray(series=settings.series),
notnone(tif.series[settings.series].pages))]
else:
images = []
for i, s in enumerate(tif.series):
try:
images.append(
(tif.asarray(series=i), notnone(s.pages)))
except ValueError as e:
images.append((None, notnone(s.pages)))
if settings.debug:
raise
else:
print("\n* series %i failed: %s... " % (i, e),
end='')
print("%.3f ms" % ((time.time()-start) * 1e3))
except Exception as e:
if settings.debug:
raise
else:
print(e)
tif.close()
print("\nTIFF file:", tif)
print()
for i, s in enumerate(tif.series):
print ("Series %i" % i)
print(s)
print()
for i, page in images:
print(page)
print(page.tags)
if page.is_palette:
print("\nColor Map:", page.color_map.shape, page.color_map.dtype)
for attr in ('cz_lsm_info', 'cz_lsm_scan_info', 'uic_tags',
'mm_header', 'imagej_tags', 'micromanager_metadata',
'nih_image_header'):
if hasattr(page, attr):
print("", attr.upper(), Record(getattr(page, attr)), sep="\n")
print()
if page.is_micromanager:
print('MICROMANAGER_FILE_METADATA')
print(Record(tif.micromanager_metadata))
if images and not settings.noplot:
try:
import matplotlib
matplotlib.use('TkAgg')
from matplotlib import pyplot
except ImportError as e:
warnings.warn("failed to import matplotlib.\n%s" % e)
else:
for img, page in images:
if img is None:
continue
vmin, vmax = None, None
if 'gdal_nodata' in page.tags:
try:
vmin = numpy.min(img[img > float(page.gdal_nodata)])
except ValueError:
pass
if page.is_stk:
try:
vmin = page.uic_tags['min_scale']
vmax = page.uic_tags['max_scale']
except KeyError:
pass
else:
if vmax <= vmin:
vmin, vmax = None, None
title = "%s\n %s" % (str(tif), str(page))
imshow(img, title=title, vmin=vmin, vmax=vmax,
bitspersample=page.bits_per_sample,
photometric=page.photometric,
interpolation=settings.interpol,
dpi=settings.dpi)
pyplot.show()
TIFFfile = TiffFile # backwards compatibility
if sys.version_info[0] > 2:
basestring = str, bytes
unicode = str
if __name__ == "__main__":
sys.exit(main())
| gpl-3.0 |
petebachant/daqmx | examples/pulse_example_motion.py | 1 | 3189 | # -*- coding: utf-8 -*-
"""
Created on Tue May 21 21:06:37 2013
@author: Pete
This program commands a National Instruments counter output device to send a
pulse after a target position (read from the ACS controller) is reached.
"""
import daqmx
import acsc
import time
import numpy as np
import matplotlib
matplotlib.use("TkAgg")
import matplotlib.pyplot as plt
def main():
taskhandle = daqmx.TaskHandle()
daqmx.CreateTask("", taskhandle)
phys_chan = "Dev1/ctr0"
#Timing parameters
rate = 200 # Pulse rate in Hz
initialdelay = 0
lowtime = 1/rate/2
hightime = 1/rate/2
daqmx.CreateCOPulseChanTime(taskhandle, phys_chan, "", daqmx.Val_Seconds,
daqmx.Val_Low, initialdelay, lowtime, hightime,
False)
# Set up communication with motion controller
simulator = True
# Parameters for plotting
plotting = False
plot_dynamic = True
if simulator == True:
hcomm = acsc.OpenCommDirect()
else:
hcomm = acsc.OpenCommEthernetTCP("10.0.0.100", 701)
axis = 5
buffno = 6
target = 10000
timeout = 10
# Initialize arrays for storing time and position
t = np.array(0)
x = np.array(0)
if plotting == True and plot_dynamic == True:
plt.ioff()
fig = plt.figure()
ax = fig.add_subplot(111)
# plt.xlim(0, timeout)
# plt.ylim(0, target)
line, = ax.plot([], [])
fig.show()
if hcomm == acsc.INVALID:
print "Cannot connect to controller. Error:", acsc.GetLastError()
else:
acsc.Enable(hcomm, axis)
rpos = acsc.GetRPosition(hcomm, axis)
x = rpos
t0 = time.time()
if simulator == True:
acsc.ToPoint(hcomm, 0, axis, target+50000)
else:
acsc.RunBuffer(hcomm, buffno, None, None)
while True:
rpos = acsc.GetRPosition(hcomm, axis)
if rpos >= target: break
x = np.append(x, rpos)
t = np.append(t, time.time() - t0)
if plotting == True and plot_dynamic == True:
line.set_xdata(t)
line.set_ydata(x)
ax.relim()
ax.autoscale_view()
fig.canvas.draw()
print "Axis is", acsc.GetAxisState(hcomm, axis)+'...'
if time.time() - t0 > timeout:
print "Motion timeout"
print "Final axis position:", rpos
break
time.sleep(0.001)
print "Target reached. Sending trigger pulse to", phys_chan + "..."
daqmx.StartTask(taskhandle, fatalerror=False)
daqmx.WaitUntilTaskDone(taskhandle, timeout=10, fatalerror=False)
daqmx.ClearTask(taskhandle, fatalerror=False)
acsc.CloseComm(hcomm)
print "Triggered at", np.max(x)
if plotting == True:
if plot_dynamic == False:
plt.plot(t, x)
plt.show()
return t, x
if __name__ == "__main__":
t, x = main() | gpl-2.0 |
aszek/fun-with-oeis | code/7.py | 1 | 1242 | from oeis import OEIS
import json
def compute():
o = OEIS(None, 12)
names = [
'prime',
'graph',
'automorphism',
'group',
'space',
'code',
'elliptic',
'algorithm',
'distribution',
'probability',
'differential',
'stochastic',
'poset',
'invariant',
'automatic',
'metric',
'binary',
'field',
'fraction',
'operator',
'norm',
'set',
'cardinality',
'modulo',
'quantum',
'convex',
'manifold',
'variety',
'category',
'process',
'extension',
'equivalence',
'algebraic',
'topology',
'kernel',
'approximation'
]
stat = []
for name in names:
res = o.count(name)
print name, res
if res > 0:
stat.append((name, res))
o.pause()
stat.sort(key = lambda x: x[1])
with open('../artifacts/7.json', 'w') as outfile:
json.dump(stat, outfile)
def plot():
with open('../artifacts/7.json', 'r') as infile:
stat = json.load(infile)
import matplotlib.pyplot as plt
plt.barh(range(len(stat)), [_[1] for _ in stat], align='center')
plt.yticks(range(len(stat)), [_[0] for _ in stat])
plt.show()
#compute()
plot() | gpl-3.0 |
rodluger/planetplanet | scripts/hist_mutual.py | 1 | 15330 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
hist_mutual.py |github|
-----------------------
Histograms of the mutual transit events in TRAPPIST-1. Shows histograms
of the fractional depth and duration of these events for all pairs of
planets.
TRAPPIST-1b
~~~~~~~~~~~
.. image:: /b_mutual.jpg
:width: 400px
:align: center
TRAPPIST-1c
~~~~~~~~~~~
.. image:: /c_mutual.jpg
:width: 400px
:align: center
TRAPPIST-1d
~~~~~~~~~~~
.. image:: /d_mutual.jpg
:width: 400px
:align: center
TRAPPIST-1e
~~~~~~~~~~~
.. image:: /e_mutual.jpg
:width: 400px
:align: center
TRAPPIST-1f
~~~~~~~~~~~
.. image:: /f_mutual.jpg
:width: 400px
:align: center
TRAPPIST-1g
~~~~~~~~~~~
.. image:: /g_mutual.jpg
:width: 400px
:align: center
TRAPPIST-1h
~~~~~~~~~~~
.. image:: /h_mutual.jpg
:width: 400px
:align: center
.. role:: raw-html(raw)
:format: html
.. |github| replace:: :raw-html:`<a href = "https://github.com/rodluger/planetplanet/blob/master/scripts/hist_mutual.py"><i class="fa fa-github" aria-hidden="true"></i></a>`
'''
from __future__ import division, print_function, absolute_import, \
unicode_literals
import os
import subprocess
import planetplanet
from planetplanet import jwst
from planetplanet import Trappist1
from planetplanet.constants import *
from planetplanet.pool import Pool
import matplotlib
import matplotlib.pyplot as pl
from matplotlib.ticker import FuncFormatter
import numpy as np
import corner
from tqdm import tqdm
from scipy.stats import norm
datapath = os.path.join(os.path.dirname(os.path.dirname(
os.path.abspath(planetplanet.__file__))),
'scripts', 'data')
histpath = os.path.join(os.path.dirname(os.path.dirname(
os.path.abspath(planetplanet.__file__))),
'scripts')
if not os.path.exists(datapath):
os.makedirs(datapath)
def _test():
'''
This routine is too expensive to test on Travis, so I'm
bypassing it for now.
'''
pass
def Submit(queue = None, email = None, walltime = 8, nodes = 5, ppn = 12,
mpn = None, nsamp = 50000, batch_size = 30, nproc = None):
'''
Submits a PBS cluster job to run :py:func:`Compute` in parallel.
:param str queue: The name of the queue to submit to. \
Default :py:obj:`None`
:param str email: The email to send job status notifications to. \
Default :py:obj:`None`
:param int walltime: The number of hours to request. Default `8`
:param int nodes: The number of nodes to request. Default `5`
:param int ppn: The number of processors per node to request. Default `12`
:param int nsamp: The number of prior samples to draw. Default `50,000`
:param int batch_size: Size of each batch used in the parallelization. \
Default `100`
:param int mpn: Memory per node in gb to request. Default no setting.
:param int nproc: Number of processes to spawn. Default is the number of \
core.
'''
if nproc is None:
nproc = ppn * nodes
str_w = 'walltime=%d:00:00' % walltime
if mpn is not None:
str_n = 'nodes=%d:ppn=%d,feature=%dcore,mem=%dgb' % \
(nodes, ppn, ppn, mpn * nodes)
else:
str_n = 'nodes=%d:ppn=%d,feature=%dcore' % (nodes, ppn, ppn)
str_v = 'NPROC=%d,HISTPATH=%s,NSAMP=%d,BATCHSZ=%d' % \
(nproc, histpath, nsamp, batch_size)
str_name = 'planetplanet'
str_out = 'hist_mutual.log'
qsub_args = ['qsub', 'hist_mutual.pbs',
'-v', str_v,
'-o', str_out,
'-j', 'oe',
'-N', str_name,
'-l', str_n,
'-l', str_w]
if email is not None:
qsub_args.append(['-M', email, '-m', 'ae'])
if queue is not None:
qsub_args += ['-q', queue]
print("Submitting the job...")
subprocess.call(qsub_args)
class _FunctionWrapper(object):
'''
A simple function wrapper class. Stores :py:obj:`args` and :py:obj:`kwargs`
and allows an arbitrary function to be called via :py:func:`map`.
Used internally.
'''
def __init__(self, f, *args, **kwargs):
'''
'''
self.f = f
self.args = args
self.kwargs = kwargs
def __call__(self, x):
'''
'''
return self.f(*self.args, **self.kwargs)
def _Parallelize(nsamp, batch_size):
'''
Runs the actual parallelized computations. Used internally.
'''
# Get our function wrapper
m = _FunctionWrapper(Compute, nsamp = batch_size,
progress_bar = False)
# Parallelize. We will run `N` iterations
N = int(np.ceil(nsamp / batch_size))
with Pool() as pool:
pool.map(m, range(N))
def histogram(system, tstart, tend, dt = 0.0001):
'''
Computes statistical properties of mutual events (PPOs occuring on the face
of the star).
:param system: A system instance.
:type system: :py:obj:`planetplanet.structs.System`
:param float tstart: The integration start time (BJD − 2,450,000)
:param float tend: The integration end time (BJD − 2,450,000)
:param float dt: The time resolution in days. Occultations shorter \
than this will not be registered.
'''
# Compute the orbits
time = np.arange(tstart, tend, dt)
system.compute_orbits(time)
pairs = []
durs = []
depths = []
for bi, body in enumerate(system.bodies[1:]):
# Loop over all times w/ occultations of the star
inds = np.where(system.bodies[0].occultor)[0]
for i in inds:
# Get all bodies currently occulting the star
occultors = []
for occ in range(1, len(system.bodies)):
if (system.bodies[0].occultor[i] & 2 ** occ):
occultors.append(occ)
# Check if any of these occult each other
if len(occultors) > 1:
for occ1 in occultors:
for occ2 in occultors:
if system.bodies[occ1].occultor[i] & 2 ** occ2:
# Sort the planet indices
occ1, occ2 = sorted([occ1, occ2])
# Is this a new occultation?
if (len(pairs)==0) or (pairs[-1] != [occ1, occ2]):
pairs.append([occ1, occ2])
durs.append(0.)
depths.append(0.)
# Update the maximum depth. Based on
# http://mathworld.wolfram.com/
# Circle-CircleIntersection.html
dx2 = (system.bodies[occ1].x[i] -
system.bodies[occ2].x[i]) ** 2
dy2 = (system.bodies[occ1].y[i] -
system.bodies[occ2].y[i]) ** 2
d = np.sqrt(dx2 + dy2)
r1 = system.bodies[occ1]._r
r2 = system.bodies[occ2]._r
A = r1 ** 2 * np.arccos((d ** 2 + r1 ** 2 - r2 ** 2)
/ (2 * d * r1)) \
+ r2 ** 2 * np.arccos((d ** 2 + r2 ** 2 - r1 ** 2)
/ (2 * d * r2)) \
- 0.5 * np.sqrt((-d + r1 + r2) *
(d + r1 - r2) *
(d - r1 + r2) *
(d + r1 + r2))
Astar = np.pi * (system.bodies[0]._r) ** 2
depth = A / Astar
depths[-1] = max(depths[-1], depth)
# Update the duration
durs[-1] += dt
return np.array(pairs, dtype = int), \
np.array(durs, dtype = float), \
np.array(depths, dtype = float)
def Compute(nsamp = 100, nbody = True, progress_bar = True, **kwargs):
'''
Runs the simulations.
:param int nsamp: The number of prior samples to draw. Default `300`
:param bool nbody: Use the N-Body solver? Default :py:obj:`True`
:param bool progress_bar: Display a progress bar? Default :py:obj:`True`
'''
# Draw samples from the prior
pairs = np.empty([0, 2], dtype = int)
durs = np.array([], dtype = float)
depths = np.array([], dtype = float)
if progress_bar:
wrap = tqdm
else:
wrap = lambda x: x
for n in wrap(range(nsamp)):
# Instantiate the Trappist-1 system
system = Trappist1(sample = True, nbody = nbody,
quiet = True, **kwargs)
system.settings.timestep = 1. / 24.
# Run!
try:
p, t, d = histogram(system, OCTOBER_08_2016, OCTOBER_08_2016 + 365)
except:
print("ERROR in routine `hist.Compute()`")
continue
if len(p):
pairs = np.vstack([pairs, p])
durs = np.append(durs, t)
depths = np.append(depths, d)
# Save
n = 0
while os.path.exists(os.path.join(datapath, 'hist_mutual%03d.npz' % n)):
n += 1
np.savez(os.path.join(datapath, 'hist_mutual%03d.npz' % n),
pairs = pairs, durs = durs, depths = depths)
def MergeFiles():
'''
Merge all the `npz` savesets into a single one for faster plotting.
'''
# Load
pairs = np.empty([0, 2], dtype = int)
durs = np.array([], dtype = float)
depths = np.array([], dtype = float)
print("Loading...")
for n in tqdm(range(1000)):
if os.path.exists(os.path.join(datapath, 'hist_mutual%03d.npz' % n)):
# Skip corrupt files
try:
data = np.load(os.path.join(datapath, 'hist_mutual%03d.npz' % n))
os.remove(os.path.join(datapath, 'hist_mutual%03d.npz' % n))
data['pairs'][0]
data['durs'][0]
data['depths'][0]
except:
continue
else:
break
pairs = np.vstack([pairs, data['pairs']])
durs = np.append(durs, data['durs'])
depths = np.append(depths, data['depths'])
# Save as one big file
if n > 0:
print("Saving...")
np.savez(os.path.join(datapath,'hist_mutual000.npz'),
pairs = pairs, durs = durs, depths = depths)
def Plot():
'''
'''
# Load
pairs = np.empty([0, 2], dtype = int)
durs = np.array([], dtype = float)
depths = np.array([], dtype = float)
print("Loading...")
for n in tqdm(range(1000)):
if os.path.exists(os.path.join(datapath, 'hist_mutual%03d.npz' % n)):
# Skip corrupt files
try:
data = np.load(os.path.join(datapath, 'hist_mutual%03d.npz' % n))
data['pairs'][0]
data['durs'][0]
data['depths'][0]
except:
continue
else:
if n == 0:
raise Exception("Please run `Compute()` first.")
break
pairs = np.vstack([pairs, data['pairs']])
durs = np.append(durs, data['durs'])
depths = np.append(depths, data['depths'])
# Dummy system to get colors
system = Trappist1()
colors = [system.bodies[n].color for n in range(1, 8)]
# For the paper, we ran 30,000 simulations, so the average
# number of mutual transits per year is...
print("Mutual transits per year: %.3f" % (len(pairs) / 30000.))
# Loop over all planets
for k in range(1, 8):
# Indices of events involving this planet
inds = np.where((pairs[:,0] == k) | (pairs[:,1] == k))[0]
# Again, for the 30,000 simulations we ran...
print("%s: %.3f" % (system.bodies[k].name, len(pairs[inds]) / 30000.))
# Duration
dt = durs[inds] / MINUTE
# Depth
d = depths[inds] * 1e2
# Corner plot
samples = np.vstack((dt, d)).T
fig = corner.corner(samples, plot_datapoints = False,
range = [(0, 60), (0, 1)],
labels = ["Duration [min]",
"Depth [%]"],
bins = 30,
hist_kwargs = {'color': 'w'})
# Indices of events involving each of the planets
pinds = [[] for j in range(1, 8)]
for j in range(1, 8):
if j != k:
pinds[j - 1] = np.where((pairs[inds,0] == j) | (pairs[inds,1] == j))[0]
# Duration stacked histogram
n, _, _ = fig.axes[0].hist([dt[p] for p in pinds], bins = 30,
range = (0, 60),
stacked = True,
normed = 1,
color = colors,
alpha = 0.5)
maxn = np.max(np.array(n)[-1])
fig.axes[0].hist(dt, bins = 30, range = (0, 60), normed = 1,
color = 'k', alpha = 1, histtype = 'step')
fig.axes[0].set_ylim(0, 1.1 * maxn)
# Depth stacked histogram
n, _, _ = fig.axes[3].hist([d[p] for p in pinds], bins = 30,
range = (0, 1),
stacked = True,
normed = 1,
color = colors,
alpha = 0.5)
maxn = np.max(np.array(n)[-1])
fig.axes[3].hist(d, bins = 30, range = (0, 1), normed = 1,
color = 'k', alpha = 1, histtype = 'step')
fig.axes[3].set_ylim(0, 1.1 * maxn)
# Tweak appearance
for i, ax in enumerate(fig.axes):
ax.set_xlabel(ax.get_xlabel(), fontsize = 14, fontweight = 'bold')
ax.set_ylabel(ax.get_ylabel(), fontsize = 14, fontweight = 'bold')
for tick in ax.get_xticklabels() + ax.get_yticklabels():
tick.set_fontsize(12)
# HACK: Legend for planet `b`
if k == 1:
for occultor in [2,3,4,5,7]:
fig.axes[0].axhline(-1, color = system.bodies[occultor].color,
lw = 4, alpha = 0.5,
label = system.bodies[occultor].name)
fig.axes[0].legend(loc = 'upper right',
fontsize = 8, borderpad = 1)
# Save!
fig.savefig('%s_mutual.pdf' % system.bodies[k].name,
bbox_inches = 'tight') | gpl-3.0 |
DouglasLeeTucker/DECam_PGCM | bin/sdss_galex_transform_u.py | 1 | 45529 | #!/usr/bin/env python
"""
sdss_galex_transform_u.py
Example:
sdss_galex_transform_u --help
sdss_galex_transform_u.py --matchFile sdssdr13GUVCat_297.csv --verbose 2
"""
##################################
import numpy as np
import pandas as pd
import math
from scipy import interpolate
from scipy.optimize import leastsq
import os
import sys
import glob
import datetime
import extinction
import matplotlib.pyplot as plt
import plotly
from plotly.offline import download_plotlyjs, plot, iplot
import plotly.graph_objs as go
import healpy as hp
import healpixTools
import paramFile
def main():
import argparse
import time
"""Create command line arguments"""
parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('--matchFile', help='name of the input CSV match file', default='default')
parser.add_argument('--resultsFile', help='name of the fitting results output CSV file', default='default')
parser.add_argument('--planFile', help='name of the input plan file', default='sdss_galex_transform_u.par')
parser.add_argument('--verbose', help='verbosity level of output to screen (0,1,2,...)', default=0, type=int)
args = parser.parse_args()
if args.verbose > 0: print args
status = sdss_galex_transform_u(args)
return status
##################################
# sdss_galex_transform_u
#
def sdss_galex_transform_u(args):
import os
import sys
import paramFile
if args.verbose>0:
print
print '* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *'
print 'sdss_galex_transform_u'
print '* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *'
print
# Read contents of planFile into python dictionary...
planDict = paramFile.readParamFile(args.planFile, args.verbose)
# Check that all the required keywords were found...
requiredKeywordList = ['norder',
'nsigma',
'niter',
'matchDir',
'matchFile',
'resultsFile']
flag = 0
for requiredKeyword in requiredKeywordList:
if requiredKeyword not in planDict:
print """Required keyword '%s' not in planFile %s""" % (requiredKeyword, args.planFile)
flag = flag + 1
if flag > 0:
print 'Returning with error code 1 now...'
return 1
# Extract the relevant info from the planDict...
# Extract norder...
# (Default is 2)
if planDict['norder'].lower() == 'default':
norder = 2
else:
norder = int(planDict['norder'])
if args.verbose > 0:
print 'norder: ', norder
# Extract nsigma...
# (Default is 3.0)
if planDict['nsigma'].lower() == 'default':
nsigma = 3.0
else:
nsigma = float(planDict['nsigma'])
if args.verbose > 0:
print 'nsigma: ', nsigma
# Extract niter...
# (Default is 3)
if planDict['niter'].lower() == 'default':
niter = 3
else:
niter = int(planDict['niter'])
if args.verbose > 0:
print 'niter: ', norder
# Grab matchFile name from command-line argument list....
# If it is not set, then extract the full path
# (matchDir+matchFile) from the paramFile...
matchFile = args.matchFile
if matchFile == 'default':
# Extract the name of the matchDir...
matchDir = planDict['matchDir']
if matchDir.lower() == 'default':
matchDir = '/data/des20.b/data/sallam/GALEX/GUVCat_AIS_Bianchietal2017/DLT-03-07-18/sdssdr13GUVCat'
# Extract the name of the matchFile...
matchFile = planDict['matchFile']
if matchFile.lower() == 'default':
matchFile = 'sdssdr13GUVCat_297.csv'
# Create full patch of the matchFile
matchFile = os.path.join(matchDir,matchFile)
# Check to make sure matchFile exists...
if os.path.isfile(matchFile)==False:
print """matchFile %s does not exist...""" % (matchFile)
print 'Returning with error code 1 now...'
return 1
if args.verbose > 0:
print 'matchFile: ', matchFile
# Grab resultsFile name from command-line argument list....
# If it is not set, then extract the full path
# (resultsDir+resultsFile) from the paramFile...
resultsFile = args.resultsFile
if resultsFile == 'default':
# Extract the name of the resultsDir...
resultsDir = planDict['resultsDir']
if resultsDir.lower() == 'default':
resultsDir = os.getcwd()
# Extract the name of the matchFile...
resultsFile = planDict['resultsFile']
if resultsFile.lower() == 'default':
resultsFile = 'sdss_galex_transform_results.csv'
# Create full patch of the matchFile
resultsFile = os.path.join(resultsDir,resultsFile)
# Create a full path base name for QA files based on the value of resultsFile...
qaFileBaseName = os.path.splitext(resultsFile)[0]
# Read in selected columns from file into Pandas dataframe:
columns = ['psfMag_u','psfMag_g','psfMag_r','psfMag_i','psfMag_z','FUV_MAG','NUV_MAG']
df = pd.read_csv(matchFile, usecols=columns)
df.head(10)
# Rename columns...
df.rename(columns={'psfMag_u':'u_sdss',
'psfMag_g':'g_sdss',
'psfMag_r':'r_sdss',
'psfMag_i':'i_sdss',
'psfMag_z':'z_sdss',
'FUV_MAG':'FUV_galex',
'NUV_MAG':'NUV_galex'
},inplace=True)
# Add color columns...
df.loc[:,'ug_sdss'] = df.loc[:,'u_sdss'] - df.loc[:,'g_sdss']
df.loc[:,'gr_sdss'] = df.loc[:,'g_sdss'] - df.loc[:,'r_sdss']
df.loc[:,'ri_sdss'] = df.loc[:,'r_sdss'] - df.loc[:,'i_sdss']
df.loc[:,'iz_sdss'] = df.loc[:,'i_sdss'] - df.loc[:,'z_sdss']
df.loc[:,'gi_sdss'] = df.loc[:,'g_sdss'] - df.loc[:,'i_sdss']
df.loc[:,'uNUV'] = df.loc[:,'u_sdss'] - df.loc[:,'NUV_galex']
df.loc[:,'NUVg'] = df.loc[:,'NUV_galex'] - df.loc[:,'g_sdss']
df.loc[:,'FUVNUV'] = df.loc[:,'FUV_galex'] - df.loc[:,'NUV_galex']
# Create initial (and generous) mask...
mask = ( ( df['ug_sdss'] > 0.5 ) & ( df['ug_sdss'] < 2.0 ) &
( df['gr_sdss'] > 0.2 ) & ( df['gr_sdss'] < 0.8 ) &
( df['gi_sdss'] > 0.3 ) & ( df['gi_sdss'] < 1.0 ) &
( df['uNUV'] > -10 ) & ( df['uNUV'] < 10.0 ) &
( df['NUVg'] > -10 ) & ( df['NUVg'] < 10.0 ) )
# Make a backup copy of original df...
df_orig = df.copy()
# Make a backup copy of original mask...
mask_orig = mask.copy()
# Open fit results output file...
try:
fout = open(resultsFile, 'w')
except IOError:
sys.exit('Unable to write to file ' + resultsFile)
# Write header to fit results output file...
hdr = createFitResultsHeaderOutputLine(norder)
fout.write(hdr+'\n')
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# Fit (u_sdss-NUV) vs. (NUV-g_sdss)... #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
if args.verbose > 0:
print """# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #"""
print """# Fit (u_sdss-NUV) vs. (NUV-g_sdss) #"""
print """# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #"""
print
# Create names for use in QA plots...
dmagName = '$u_{sdss} - NUV_{galex}$'
colorName1 = '$(NUV_{galex} - g_{sdss})$'
# Grab the original version of df from the backup copy...
df = df_orig.copy()
# Grab the original version of mask from the backup copy...
mask = mask_orig.copy()
# Iterate, with sigma-clipping...
for i in range(niter):
iiter = i + 1
if args.verbose > 0:
print """ iter%d...""" % ( iiter )
# make a copy of original df, overwriting the old one...
df = df[mask].copy()
# Identify dmag and color1 series...
dmag = df.loc[:,'uNUV']
color1 = df.loc[:,'NUVg']
# Perform fit...
p,perr,rms = transformFit1(color1, dmag, norder, args.verbose)
df.loc[:,'res'] = residuals1(p, color1, dmag)
# Identify outliers...
stddev = df['res'].std()
mask = (np.abs(df.res)< nsigma*stddev)
outputLine = createFitResultsOutputLine(2, p, perr, rms, 'uNUV', 'NUVg')
fout.write(outputLine+'\n')
if args.verbose > 0:
print outputLine
print
# Create QA plots...
res = df.loc[:,'res']
outputFileName = """%s.%s.%s.qa1.png""" % (qaFileBaseName, 'uNUV', 'NUVg')
status = sdssGalexTransform1ColorQAPlots1(dmag, color1, res, norder, dmagName, colorName1, p, rms, outputFileName)
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# Fit (u_sdss-g_sdss) vs. (g_sdss-r_sdss)... #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
if args.verbose > 0:
print """# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #"""
print """# Fit (u_sdss-g_sdss) vs. (g_sdss-r_sdss) #"""
print """# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #"""
print
# Create names for use in QA plots...
dmagName = '$u_{sdss} - g_{sdss}$'
colorName1 = '$(g-r)_{sdss}$'
# Grab the original version of df from the backup copy...
df = df_orig.copy()
# Grab the original version of mask from the backup copy...
mask = mask_orig.copy()
# Iterate, with sigma-clipping...
for i in range(niter):
iiter = i + 1
if args.verbose > 0:
print """ iter%d...""" % ( iiter )
# make a copy of original df, overwriting the old one...
df = df[mask].copy()
# Identify dmag and color1 series...
dmag = df.loc[:,'ug_sdss']
color1 = df.loc[:,'gr_sdss']
# Perform fit...
p,perr,rms = transformFit1(color1, dmag, norder, args.verbose)
df.loc[:,'res'] = residuals1(p, color1, dmag)
# Identify outliers...
stddev = df['res'].std()
mask = (np.abs(df.res)< nsigma*stddev)
outputLine = createFitResultsOutputLine(2, p, perr, rms, 'ug_sdss', 'gr_sdss')
fout.write(outputLine+'\n')
if args.verbose > 0:
print outputLine
print
# Create QA plots...
res = df.loc[:,'res']
outputFileName = """%s.%s.%s.qa1.png""" % (qaFileBaseName, 'ug_sdss', 'gr_sdss')
status = sdssGalexTransform1ColorQAPlots1(dmag, color1, res, norder, dmagName, colorName1, p, rms, outputFileName)
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# Fit (u_sdss-g_sdss) vs. (g_sdss-i_sdss)... #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
if args.verbose > 0:
print """# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #"""
print """# Fit (u_sdss-g_sdss) vs. (g_sdss-i_sdss) #"""
print """# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #"""
print
# Create names for use in QA plots...
dmagName = '$u_{sdss} - g_{sdss}$'
colorName1 = '$(g-i)_{sdss}$'
# Grab the original version of df from the backup copy...
df = df_orig.copy()
# Grab the original version of mask from the backup copy...
mask = mask_orig.copy()
# Iterate, with sigma-clipping...
for i in range(niter):
iiter = i + 1
if args.verbose > 0:
print """ iter%d...""" % ( iiter )
# make a copy of original df, overwriting the old one...
df = df[mask].copy()
# Identify dmag and color1 series...
dmag = df.loc[:,'ug_sdss']
color1 = df.loc[:,'gi_sdss']
# Perform fit...
p,perr,rms = transformFit1(color1, dmag, norder, args.verbose)
df.loc[:,'res'] = residuals1(p, color1, dmag)
# Identify outliers...
stddev = df['res'].std()
mask = (np.abs(df.res)< nsigma*stddev)
outputLine = createFitResultsOutputLine(2, p, perr, rms, 'ug_sdss', 'gi_sdss')
fout.write(outputLine+'\n')
if args.verbose > 0:
print outputLine
print
# Create QA plots...
res = df.loc[:,'res']
outputFileName = """%s.%s.%s.qa1.png""" % (qaFileBaseName, 'ug_sdss', 'gi_sdss')
status = sdssGalexTransform1ColorQAPlots1(dmag, color1, res, norder, dmagName, colorName1, p, rms, outputFileName)
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# Fit (u_sdss-NUV) vs. (NUV-g_sdss) and (g_sdss-r_sdss)... #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
if args.verbose > 0:
print """# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #"""
print """# Fit (u_sdss-NUV) vs. (NUV-g_sdss) and (g_sdss-r_sdss)... #"""
print """# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #"""
print
# Create names for use in QA plots...
dmagName = '$u_{sdss} - NUV_{galex}$'
colorName1 = '$(NUV_{galex} - g_{sdss})$'
colorName2 = '$(g-r)_{sdss}$'
# Grab the original version of df from the backup copy...
df = df_orig.copy()
# Grab the original version of mask from the backup copy...
mask = mask_orig.copy()
# Iterate, with sigma-clipping...
for i in range(niter):
iiter = i + 1
if args.verbose > 0:
print """ iter%d...""" % ( iiter )
# make a copy of original df, overwriting the old one...
df = df[mask].copy()
# Identify dmag, color1, and color2 series...
dmag = df.loc[:,'uNUV']
color1 = df.loc[:,'NUVg']
color2 = df.loc[:,'gr_sdss']
# Perform fit...
p,perr,rms = transformFit2(color1, color2, dmag, norder, args.verbose)
df.loc[:,'res'] = residuals2(p, color1, color2, dmag)
# Identify outliers...
stddev = df['res'].std()
mask = (np.abs(df.res)< nsigma*stddev)
outputLine = createFitResultsOutputLine(norder, p, perr, rms, 'uNUV', 'NUVg', 'gr_sdss')
fout.write(outputLine+'\n')
if args.verbose > 0:
print outputLine
print
# Create QA plots...
res = df.loc[:,'res']
outputFileName = """%s.%s.%s.%s.qa1.png""" % (qaFileBaseName, 'uNUV', 'NUVg', 'gr_sdss')
status = sdssGalexTransform2ColorQAPlots1(dmag, color1, color2, res, norder, dmagName, colorName1, colorName2, p, rms, outputFileName)
outputFileName = """%s.%s.%s.%s.qa2.html""" % (qaFileBaseName, 'uNUV', 'NUVg', 'gr_sdss')
status = sdssGalexTransform2ColorQAPlots2(df, 'uNUV', 'NUVg', 'gr_sdss', 'res', dmagName, colorName1, colorName2, norder, p, rms, outputFileName)
outputFileName = """%s.%s.%s.%s.qa3_res.html""" % (qaFileBaseName, 'uNUV', 'NUVg', 'gr_sdss')
status = sdssGalexTransform2ColorQAPlots3(df, 'uNUV', 'NUVg', 'gr_sdss', 'res', dmagName, colorName1, colorName2, norder, p, rms, outputFileName)
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# Fit (u_sdss-NUV) vs. (NUV-g_sdss) and (g_sdss-i_sdss)... #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
if args.verbose > 0:
print """# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #"""
print """# Fit (u_sdss-NUV) vs. (NUV-g_sdss) and (g_sdss-i_sdss)... #"""
print """# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #"""
print
# Create names for use in QA plots...
dmagName = '$u_{sdss} - NUV_{galex}$'
colorName1 = '$(NUV_{galex} - g_{sdss})$'
colorName2 = '$(g-i)_{sdss}$'
# Grab the original version of df from the backup copy...
df = df_orig.copy()
# Grab the original version of mask from the backup copy...
mask = mask_orig.copy()
# Iterate, with sigma-clipping...
for i in range(niter):
iiter = i + 1
if args.verbose > 0:
print """ iter%d...""" % ( iiter )
# make a copy of original df, overwriting the old one...
df = df[mask].copy()
# Identify dmag, color1, and color2 series...
dmag = df.loc[:,'uNUV']
color1 = df.loc[:,'NUVg']
color2 = df.loc[:,'gi_sdss']
# Perform fit...
p,perr,rms = transformFit2(color1, color2, dmag, norder, args.verbose)
df.loc[:,'res'] = residuals2(p, color1, color2, dmag)
# Identify outliers...
stddev = df['res'].std()
mask = (np.abs(df.res)< nsigma*stddev)
outputLine = createFitResultsOutputLine(norder, p, perr, rms, 'uNUV', 'NUVg', 'gi_sdss')
fout.write(outputLine+'\n')
if args.verbose > 0:
print outputLine
print
# Create QA plots...
res = df.loc[:,'res']
outputFileName = """%s.%s.%s.%s.qa1.png""" % (qaFileBaseName, 'uNUV', 'NUVg', 'gi_sdss')
status = sdssGalexTransform2ColorQAPlots1(dmag, color1, color2, res, norder, dmagName, colorName1, colorName2, p, rms, outputFileName)
outputFileName = """%s.%s.%s.%s.qa2.html""" % (qaFileBaseName, 'uNUV', 'NUVg', 'gi_sdss')
status = sdssGalexTransform2ColorQAPlots2(df, 'uNUV', 'NUVg', 'gi_sdss', 'res', dmagName, colorName1, colorName2, norder, p, rms, outputFileName)
outputFileName = """%s.%s.%s.%s.qa3_res.html""" % (qaFileBaseName, 'uNUV', 'NUVg', 'gi_sdss')
status = sdssGalexTransform2ColorQAPlots3(df, 'uNUV', 'NUVg', 'gi_sdss', 'res', dmagName, colorName1, colorName2, norder, p, rms, outputFileName)
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# Fit (u_sdss-g_sdss) vs. (g_sdss-r_sdss) and (r_sdss-i_sdss)... #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
if args.verbose > 0:
print """# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #"""
print """# Fit (u_sdss-g_sdss) vs. (g_sdss-r_sdss) and (r_sdss-i_sdss)... #"""
print """# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #"""
print
# Create names for use in QA plots...
dmagName = '$u_{sdss} - g_{sdss}$'
colorName1 = '$(g-r)_{sdss}$'
colorName2 = '$(r-i)_{sdss}$'
# Grab the original version of df from the backup copy...
df = df_orig.copy()
# Grab the original version of mask from the backup copy...
mask = mask_orig.copy()
# Iterate, with sigma-clipping...
for i in range(niter):
iiter = i + 1
if args.verbose > 0:
print """ iter%d...""" % ( iiter )
# make a copy of original df, overwriting the old one...
df = df[mask].copy()
# Identify dmag, color1, and color2 series...
dmag = df.loc[:,'ug_sdss']
color1 = df.loc[:,'gr_sdss']
color2 = df.loc[:,'ri_sdss']
# Perform fit...
p,perr,rms = transformFit2(color1, color2, dmag, norder, args.verbose)
df.loc[:,'res'] = residuals2(p, color1, color2, dmag)
# Identify outliers...
stddev = df['res'].std()
mask = (np.abs(df.res)< nsigma*stddev)
outputLine = createFitResultsOutputLine(norder, p, perr, rms, 'ug_sdss', 'gr_sdss', 'ri_sdss')
fout.write(outputLine+'\n')
if args.verbose > 0:
print outputLine
print
# Create QA plots...
res = df.loc[:,'res']
outputFileName = """%s.%s.%s.%s.qa1.png""" % (qaFileBaseName, 'ug_sdss', 'gr_sdss', 'ri_sdss')
status = sdssGalexTransform2ColorQAPlots1(dmag, color1, color2, res, norder, dmagName, colorName1, colorName2, p, rms, outputFileName)
outputFileName = """%s.%s.%s.%s.qa2.html""" % (qaFileBaseName, 'ug_sdss', 'gr_sdss', 'ri_sdss')
status = sdssGalexTransform2ColorQAPlots2(df, 'ug_sdss', 'gr_sdss', 'ri_sdss', 'res', dmagName, colorName1, colorName2, norder, p, rms, outputFileName)
outputFileName = """%s.%s.%s.%s.qa3_res.html""" % (qaFileBaseName, 'ug_sdss', 'gr_sdss', 'ri_sdss')
status = sdssGalexTransform2ColorQAPlots3(df, 'ug_sdss', 'gr_sdss', 'ri_sdss', 'res', dmagName, colorName1, colorName2, norder, p, rms, outputFileName)
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# Fit (u_sdss-g_sdss) vs. (g_sdss-i_sdss) and (r_sdss-i_sdss)... #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
if args.verbose > 0:
print """# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #"""
print """# Fit (u_sdss-g_sdss) vs. (g_sdss-i_sdss) and (r_sdss-i_sdss)... #"""
print """# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #"""
print
# Create names for use in QA plots...
dmagName = '$u_{sdss} - g_{sdss}$'
colorName1 = '$(g-i)_{sdss}$'
colorName2 = '$(r-i)_{sdss}$'
# Grab the original version of df from the backup copy...
df = df_orig.copy()
# Grab the original version of mask from the backup copy...
mask = mask_orig.copy()
# Iterate, with sigma-clipping...
for i in range(niter):
iiter = i + 1
if args.verbose > 0:
print """ iter%d...""" % ( iiter )
# make a copy of original df, overwriting the old one...
df = df[mask].copy()
# Identify dmag, color1, and color2 series...
dmag = df.loc[:,'ug_sdss']
color1 = df.loc[:,'gi_sdss']
color2 = df.loc[:,'ri_sdss']
# Perform fit...
p,perr,rms = transformFit2(color1, color2, dmag, norder, args.verbose)
df.loc[:,'res'] = residuals2(p, color1, color2, dmag)
# Identify outliers...
stddev = df['res'].std()
mask = (np.abs(df.res)< nsigma*stddev)
outputLine = createFitResultsOutputLine(norder, p, perr, rms, 'ug_sdss', 'gi_sdss', 'ri_sdss')
fout.write(outputLine+'\n')
if args.verbose > 0:
print outputLine
print
# Create QA plots...
res = df.loc[:,'res']
outputFileName = """%s.%s.%s.%s.qa1.png""" % (qaFileBaseName, 'ug_sdss', 'gi_sdss', 'ri_sdss')
status = sdssGalexTransform2ColorQAPlots1(dmag, color1, color2, res, norder, dmagName, colorName1, colorName2, p, rms, outputFileName)
outputFileName = """%s.%s.%s.%s.qa2.html""" % (qaFileBaseName, 'ug_sdss', 'gi_sdss', 'ri_sdss')
status = sdssGalexTransform2ColorQAPlots2(df, 'ug_sdss', 'gi_sdss', 'ri_sdss', 'res', dmagName, colorName1, colorName2, norder, p, rms, outputFileName)
outputFileName = """%s.%s.%s.%s.qa3_res.html""" % (qaFileBaseName, 'ug_sdss', 'gi_sdss', 'ri_sdss')
status = sdssGalexTransform2ColorQAPlots3(df, 'ug_sdss', 'gi_sdss', 'ri_sdss', 'res', dmagName, colorName1, colorName2, norder, p, rms, outputFileName)
# Close outputFile...
fout.close()
return 0
##################################
#
# Define some functions for fitting dmag vs. color...
#
# These functions are based on a scripts found at
# http://linuxgazette.net/115/andreasen.html (by Anders Andreasen)
# and at
# http://www.phy.uct.ac.za/courses/python/examples/fitresonance.py (University of Cape Town)
##################################
# Parametric function:
# p is the parameter vector;
# For fp1, we assume a polynomial function in one color...
def fp1(p,color1_array):
#retValue = p[0] + p[1]*color1_array + p[2]*color1_array*color1_array
norder = p.size-1
retValue = p[0]
for i in range(norder):
retValue = retValue + p[i+1]*color1_array**(i+1)
return retValue
##################################
# Error function:
def residuals1(p,color1_array,dmag_array):
err = (dmag_array-fp1(p,color1_array))
return err
##################################
# Fitting code:
def transformFit1(color1_array, dmag_array, norder=2, verbose=0):
# Calculate the median of dmag for use as an initial guess
# for the overall zeropoint offset..
mdn = np.median( dmag_array, None )
# Parameter names
#pname = (['c_0', 'c_1', 'c_2'])
pname = []
for i in range(0,norder+1):
pname.append("""c_%d""" % i)
# Initial parameter values
#p0 = [mdn, 0.0, 0.0]
p0 = (1+norder)*[0.0]
p0[0] = mdn
if verbose > 0:
print
print 'Initial parameter values: ', p0
# Perform fit
p,cov,infodict,mesg,ier = leastsq(residuals1, p0,
args=(color1_array, dmag_array),
maxfev=10000, full_output=1)
if ( ier>=1 and ier <=4):
if verbose > 0: print "Converged"
else:
# Add an exception error or a non-zero return value?
print "Not converged"
print mesg
# Calculate some descriptors of the fit
# (similar to the output from gnuplot 2d fits)
chisq=sum(infodict['fvec']*infodict['fvec'])
dof=len(dmag_array)-len(p)
rms=math.sqrt(chisq/dof)
if verbose > 0:
print "Converged with chi squared ",chisq
print "degrees of freedom, dof ", dof
print "RMS of residuals (i.e. sqrt(chisq/dof)) ", rms
print "Reduced chisq (i.e. variance of residuals) ", chisq/dof
print
# uncertainties are calculated as per gnuplot, "fixing" the result
# for non unit values of the reduced chisq.
# values at min match gnuplot
perr = []
if verbose > 0:
print "Fitted parameters at minimum, with 68% C.I.:"
for i,pmin in enumerate(p):
if verbose > 0:
print "%-10s %13g +/- %13g (%5f percent)" % (pname[i],pmin,math.sqrt(cov[i,i])*math.sqrt(chisq/dof),
100.*math.sqrt(cov[i,i])*math.sqrt(chisq/dof)/abs(pmin))
perr.append(math.sqrt(cov[i,i])*math.sqrt(chisq/dof))
if verbose > 0: print
if verbose > 0:
print "Correlation matrix:"
# correlation matrix close to gnuplot
print " ",
for i in range(len(pname)): print "%-10s" % (pname[i],),
print
for i in range(len(p)):
print "%-10s" % pname[i],
for j in range(i+1):
print "%10f" % (cov[i,j]/math.sqrt(cov[i,i]*cov[j,j]),),
#endfor
print
#endfor
print
print
print
return p, perr, rms
##################################
#
# Define some functions for fitting dmag vs. color1 and color2...
#
# These functions are based on a scripts found at
# http://linuxgazette.net/115/andreasen.html (by Anders Andreasen)
# and at
# http://www.phy.uct.ac.za/courses/python/examples/fitresonance.py (University of Cape Town)
##################################
# Parametric function:
# p is the parameter vector;
# For fp2, we assume a polynomial in each of the 2 colors
# but with no cross terms...
def fp2(p,color1_array,color2_array):
#retValue = p[0] + \
# p[1]*color1_array + p[2]*color1_array*color1_array + \
# p[3]*color2_array + p[4]*color2_array*color2_array
norder = (p.size-1)/2
retValue = p[0]
for i in range(norder):
retValue = retValue + p[i+1]*color1_array**(i+1)
retValue = retValue + p[i+norder+1]*color2_array**(i+1)
return retValue
##################################
# Error function:
def residuals2(p,color1_array,color2_array,dmag_array):
err = (dmag_array-fp2(p,color1_array,color2_array))
return err
##################################
# Fitting code:
def transformFit2(color1_array, color2_array, dmag_array, norder=2, verbose=0):
# Calculate the median of dmag for use as an initial guess
# for the overall zeropoint offset..
mdn = np.median( dmag_array, None )
# Parameter names
#pname = (['c_0', 'c_1', 'c_2', 'c_3', 'c_4'])
pname = []
for i in range(0,2*norder+1):
pname.append("""c_%d""" % i)
# Initial parameter values
#p0 = [mdn, 0.0, 0.0, 0.0, 0.0]
p0 = (1+2*norder)*[0.0]
p0[0] = mdn
if verbose > 0:
print
print 'Initial parameter values: ', p0
# Perform fit
p,cov,infodict,mesg,ier = leastsq(residuals2, p0,
args=(color1_array, color2_array, dmag_array),
maxfev=10000, full_output=1)
if ( ier>=1 and ier <=4):
if verbose > 0: print "Converged"
else:
# Add an exception error or a non-zero return value?
print "Not converged"
print mesg
# Calculate some descriptors of the fit
# (similar to the output from gnuplot 2d fits)
chisq=sum(infodict['fvec']*infodict['fvec'])
dof=len(dmag_array)-len(p)
rms=math.sqrt(chisq/dof)
if verbose > 0:
print "Converged with chi squared ",chisq
print "degrees of freedom, dof ", dof
print "RMS of residuals (i.e. sqrt(chisq/dof)) ", rms
print "Reduced chisq (i.e. variance of residuals) ", chisq/dof
print
# uncertainties are calculated as per gnuplot, "fixing" the result
# for non unit values of the reduced chisq.
# values at min match gnuplot
perr = []
if verbose > 0:
print "Fitted parameters at minimum, with 68% C.I.:"
for i,pmin in enumerate(p):
if verbose > 0:
print "%-10s %13g +/- %13g (%5f percent)" % (pname[i],pmin,math.sqrt(cov[i,i])*math.sqrt(chisq/dof),
100.*math.sqrt(cov[i,i])*math.sqrt(chisq/dof)/abs(pmin))
perr.append(math.sqrt(cov[i,i])*math.sqrt(chisq/dof))
if verbose > 0: print
if verbose > 0:
print "Correlation matrix:"
# correlation matrix close to gnuplot
print " ",
for i in range(len(pname)): print "%-10s" % (pname[i],),
print
for i in range(len(p)):
print "%-10s" % pname[i],
for j in range(i+1):
print "%10f" % (cov[i,j]/math.sqrt(cov[i,i]*cov[j,j]),),
#endfor
print
#endfor
print
print
print
return p, perr, rms
##################################
def createFitResultsOutputLine(norder, p, perr, rms, dmag_name, color1_name, color2_name=''):
outputList = (2*(2*norder+1)+4)*[-9999.]
outputList[0] = dmag_name
outputList[1] = color1_name
outputList[2] = color2_name
for j in range(p.size):
outputList[2*j+3] = p[j]
outputList[2*j+4] = perr[j]
outputList[2*(2*norder+1)+3] = rms
outputLine = ','.join(map(str, outputList))
return outputLine
##################################
def createFitResultsHeaderOutputLine(norder):
outputList = (2*(2*norder+1)+4)*['c_']
outputList[0] = 'dmag_name'
outputList[1] = 'color1_name'
outputList[2] = 'color2_name'
for j in range(2*norder+1):
outputList[2*j+3] = ("""c_%d""" % j)
outputList[2*j+4] = ("""cerr_%d""" % j)
outputList[2*(2*norder+1)+3] = 'rms'
outputLine = ','.join(map(str, outputList))
return outputLine
##################################
def sdssGalexTransform1ColorQAPlots1(dmag, color1, res, norder, dmagName, colorName1, p, rms, outputFileName):
# Prepare QA plots...
fig = plt.figure(figsize=(10,5))
fig.subplots_adjust(hspace=0.3)
#fig.suptitle("This is a supertitle!")
# We will exclude the lowest and highets 1% of color1, color2,
# dmag, and residuals when plotting the QA figures...
color1_desc = color1.describe(percentiles=[0.01, 0.99])
dmag_desc = dmag.describe(percentiles=[0.01, 0.99])
#res_desc = df.res.describe(percentiles=[0.01, 0.99])
res_desc = res.describe(percentiles=[0.01, 0.99])
color1_min = color1_desc['1%']
color1_max = color1_desc['99%']
dmag_min = dmag_desc['1%']
dmag_max = dmag_desc['99%']
res_min = res_desc['1%']
res_max = res_desc['99%']
# Plot 1: Descriptive text...
plt.subplot(231)
if norder == 1:
plot1Text = """%s = \n %.3f + \n %.3f*%s \n\n [rms: %.3f]""" % \
(dmagName, p[0], p[1], colorName1, rms)
elif norder == 2:
plot1Text = """%s = \n %.3f + \n %.3f*%s + \n %.3f*%s^2 \n\n [rms: %.3f]""" % \
(dmagName, p[0], p[1], colorName1, p[2], colorName1, rms)
else:
plot1Text = ''
plt.text(0.1,0.25,plot1Text)
plt.axis('off')
# Plot 2: 2D hexbin histogram of dmag vs. color1...
plt.subplot(232)
hb=plt.hexbin(color1, dmag, gridsize=100, cmap='inferno')
plt.axis([color1_min, color1_max, dmag_min, dmag_max])
plt.xlabel(colorName1)
plt.ylabel(dmagName)
cb = fig.colorbar(hb)
cb.set_label('Number')
plt.grid(color='white')
plt.grid(True)
# Plot 3: N/A
# Plot 4: 1d histogram of residuals...
plt.subplot(234)
#plt.hist(df.loc[:,'res'],bins=100)
plt.hist(res,bins=100)
plt.xlabel('residuals [mag]')
plt.ylabel('Number')
plt.grid(True)
plt.grid(color='black')
# Plot 5: 2d hexbin histogram of residuals vs. color1...
plt.subplot(235)
#hb = plt.hexbin(color1, df.loc[:,'res'], gridsize=100, cmap='inferno')
hb = plt.hexbin(color1, res, gridsize=100, cmap='inferno')
plt.axis([color1_min, color1_max, res_min, res_max])
plt.xlabel(colorName1)
plt.ylabel('residuals [mag]')
cb = plt.colorbar(hb)
cb.set_label('Number')
plt.grid(True)
plt.grid(color='white')
# Plot 6: N/A
# Plot...
plt.tight_layout()
#plt.show()
plt.savefig(outputFileName)
return 0
##################################
def sdssGalexTransform2ColorQAPlots1(dmag, color1, color2, res, norder, dmagName, colorName1, colorName2, p, rms, outputFileName):
# Prepare QA plots...
fig = plt.figure(figsize=(10,5))
fig.subplots_adjust(hspace=0.3)
#fig.suptitle("This is a supertitle!")
# We will exclude the lowest and highets 1% of color1, color2,
# dmag, and residuals when plotting the QA figures...
color1_desc = color1.describe(percentiles=[0.01, 0.99])
color2_desc = color2.describe(percentiles=[0.01, 0.99])
dmag_desc = dmag.describe(percentiles=[0.01, 0.99])
#res_desc = df.res.describe(percentiles=[0.01, 0.99])
res_desc = res.describe(percentiles=[0.01, 0.99])
color1_min = color1_desc['1%']
color1_max = color1_desc['99%']
color2_min = color2_desc['1%']
color2_max = color2_desc['99%']
dmag_min = dmag_desc['1%']
dmag_max = dmag_desc['99%']
res_min = res_desc['1%']
res_max = res_desc['99%']
# Plot 1: Descriptive text...
plt.subplot(231)
if norder == 1:
plot1Text = """%s = \n %.3f + \n %.3f*%s + \n %.3f*%s \n\n [rms: %.3f]""" % \
(dmagName, p[0], p[1], colorName1, p[2], colorName2, rms)
elif norder == 2:
plot1Text = """%s = \n %.3f + \n %.3f*%s + \n %.3f*%s^2 + \n %.3f*%s + \n %.3f*%s^2 \n\n [rms: %.3f]""" % \
(dmagName, p[0], p[1], colorName1, p[2], colorName1, p[3], colorName2, p[4], colorName2, rms)
else:
plot1Text = ''
plt.text(0.1,0.25,plot1Text)
plt.axis('off')
# Plot 2: 2D hexbin histogram of dmag vs. color1...
plt.subplot(232)
hb=plt.hexbin(color1, dmag, gridsize=100, cmap='inferno')
plt.axis([color1_min, color1_max, dmag_min, dmag_max])
plt.xlabel(colorName1)
plt.ylabel(dmagName)
cb = fig.colorbar(hb)
cb.set_label('Number')
plt.grid(color='white')
plt.grid(True)
# Plot 3: 2D hexbin histogram of dmag vs. color2...
plt.subplot(233)
hb=plt.hexbin(color2, dmag, gridsize=100, cmap='inferno')
plt.axis([color2_min, color2_max, dmag_min, dmag_max])
plt.xlabel(colorName2)
plt.ylabel(dmagName)
cb = plt.colorbar(hb)
cb.set_label('Number')
plt.grid(color='white')
plt.grid(True)
# Plot 4: 1d histogram of residuals...
plt.subplot(234)
#plt.hist(df.loc[:,'res'],bins=100)
plt.hist(res,bins=100)
plt.xlabel('residuals [mag]')
plt.ylabel('Number')
plt.grid(True)
plt.grid(color='black')
# Plot 5: 2d hexbin histogram of residuals vs. color1...
plt.subplot(235)
#hb = plt.hexbin(color1, df.loc[:,'res'], gridsize=100, cmap='inferno')
hb = plt.hexbin(color1, res, gridsize=100, cmap='inferno')
plt.axis([color1_min, color1_max, res_min, res_max])
plt.xlabel(colorName1)
plt.ylabel('residuals [mag]')
cb = plt.colorbar(hb)
cb.set_label('Number')
plt.grid(True)
plt.grid(color='white')
# Plot 6: 2d hexbin histogram of residuals vs. color2...
plt.subplot(236)
#hb = plt.hexbin(color2, df.loc[:,'res'], gridsize=100, cmap='inferno')
hb = plt.hexbin(color2, res, gridsize=100, cmap='inferno')
plt.axis([color2_min, color2_max, res_min, res_max])
plt.xlabel(colorName2)
plt.ylabel('residuals [mag]')
cb = plt.colorbar(hb)
cb.set_label('Number')
plt.grid(True)
plt.grid(color='white')
# Plot...
plt.tight_layout()
#plt.show()
plt.savefig(outputFileName)
return 0
##################################
# Based on plotly code at http://inversionlabs.com/2016/03/21/best-fit-surface.html
def sdssGalexTransform2ColorQAPlots2(df, colName_dmag, colName_color1, colName_color2, colName_res, dmagName, colorName1, colorName2, norder, p, rms, outputFileName):
# An initial sanity check...
if norder > 2:
print 'sdssGalexTransform2ColorQAPlots2 can not deal with norder > 2... skipping...'
return 1
# Create data from color1, color2, and dmag...
# If the sample size is larger than 1000,
# take a random sample of 1000 elements...
n_elements = df[colName_res].size
if n_elements <= 1000:
x = df.loc[:,colName_color1].values
y = df.loc[:,colName_color2].values
z = df.loc[:,colName_dmag].values
else:
df1000 = df.sample(n=1000,axis=0)
n_elements = df1000[colName_res].size
x = df1000.loc[:,colName_color1].values
y = df1000.loc[:,colName_color2].values
z = df1000.loc[:,colName_dmag].values
data = np.c_[x,y,z]
# Regular grid covering the domain of the data...
mn = np.min(data, axis=0)
mx = np.max(data, axis=0)
X,Y = np.meshgrid(np.linspace(mn[0], mx[0], 20), np.linspace(mn[1], mx[1], 20))
XX = X.flatten()
YY = Y.flatten()
# Evaluate it on grid...
if norder == 1:
Z = p[0] + p[1]*X + p[2]*Y
elif norder == 2:
Z = p[0] + p[1]*X + p[2]*X**2 + p[3]*Y + p[4]*Y**2
# Create data_fit from color1, color2, and the fit parameters p[*]...
x_fit = x
y_fit = y
if norder == 1:
z_fit = p[0] + p[1]*x_fit + p[2]*y_fit
elif norder == 2:
z_fit = p[0] + p[1]*x_fit + p[2]*x_fit*x_fit + p[3]*y_fit + p[4]*y_fit*y_fit
data_fit = np.c_[x_fit,y_fit,z_fit]
#delta_z = z - z_fit
# trace1 is the scatter plot of the original points...
trace1 = go.Scatter3d(
x=data[:,0],
y=data[:,1],
z=data[:,2],
#z=delta_z,
mode='markers',
marker=dict(size=1, color='red', line=dict(color='black', width=0.5), opacity=0.5)
)
# trace2 is the scatter plot of the fit values at the x,y positions of the original points...
trace2 = go.Scatter3d(
x=data_fit[:,0],
y=data_fit[:,1],
z=data_fit[:,2],
mode='markers',
marker=dict(size=2, color='yellow', line=dict(color='black', width=0.5), opacity=0.8)
)
# trace3 is the 2D surface of the fit equation...
trace3 = go.Surface(z=Z, x=X, y=Y, colorscale='RdBu', opacity=0.333)
# Package the trace dictionaries into a data object
data_go = go.Data([trace1, trace2, trace3])
# Dictionary of style options for all axes
axis = dict(
showbackground=True, # show axis background
backgroundcolor="rgb(204, 204, 204)", # set background color to grey
gridcolor="rgb(255, 255, 255)", # set grid line color
zerolinecolor="rgb(255, 255, 255)", # set zero grid line color
)
# Create a title...
if norder == 1:
titleText = """%s = %.3f + %.3f*%s + %.3f*%s [rms: %.3f]""" % \
(dmagName, p[0], p[1], colorName1, p[2], colorName2, rms)
elif norder == 2:
titleText = """%s = %.3f + %.3f*%s + %.3f*%s^2 + %.3f*%s + %.3f*%s^2\n[npts=%d, rms: %.3f]""" % \
(dmagName, p[0], p[1], colorName1, p[2], colorName1, p[3], colorName2, p[4], colorName2, n_elements, rms)
else:
titleText = ''
titleText = titleText.replace('$','')
# Make a layout object
layout = go.Layout(
title=titleText, # set plot title
scene=go.Scene( # axes are part of a 'scene' in 3d plots
xaxis=go.XAxis(axis), # set x-axis style
yaxis=go.YAxis(axis), # set y-axis style
zaxis=go.ZAxis(axis)), # set z-axis style
)
# Make a figure object
fig = go.Figure(data=data_go, layout=layout)
# Create interactive plot and save as javascript to html file...
#plotly.offline.iplot(fig, filename=outputFileName)
plotly.offline.plot(fig, filename=outputFileName, auto_open=False)
return 0
##################################
# Based on plotly code at http://inversionlabs.com/2016/03/21/best-fit-surface.html
def sdssGalexTransform2ColorQAPlots3(df, colName_dmag, colName_color1, colName_color2, colName_res, dmagName, colorName1, colorName2, norder, p, rms, outputFileName):
# An initial sanity check...
if norder > 2:
print 'sdssGalexTransform2ColorQAPlots3 can not deal with norder > 2... skipping...'
return 1
# Create data from color1, color2, and res...
# If the sample size is larger than 1000,
# take a random sample of 1000 elements...
n_elements = df[colName_res].size
if n_elements <= 1000:
x = df.loc[:,colName_color1].values
y = df.loc[:,colName_color2].values
z = df.loc[:,colName_res].values
else:
df1000 = df.sample(n=1000,axis=0)
n_elements = df1000[colName_res].size
x = df1000.loc[:,colName_color1].values
y = df1000.loc[:,colName_color2].values
z = df1000.loc[:,colName_res].values
data = np.c_[x,y,z]
# Regular grid covering the domain of the data...
mn = np.min(data, axis=0)
mx = np.max(data, axis=0)
X,Y = np.meshgrid(np.linspace(mn[0], mx[0], 20), np.linspace(mn[1], mx[1], 20))
XX = X.flatten()
YY = Y.flatten()
# Evaluate it on grid...
Z = 0.00 + 0.00*X + 0.00*Y
# Create data_fit from color1, color2, and the fit parameters p[*]...
x_fit = x
y_fit = y
z_fit = 0.00 + 0.00*x_fit + 0.00*y_fit
data_fit = np.c_[x_fit,y_fit,z_fit]
# trace1 is the scatter plot of the original points...
trace1 = go.Scatter3d(
x=data[:,0],
y=data[:,1],
z=data[:,2],
mode='markers',
marker=dict(size=1, color='red', line=dict(color='black', width=0.5), opacity=0.5)
)
# trace2 is the scatter plot of the fit values at the x,y positions of the original points...
trace2 = go.Scatter3d(
x=data_fit[:,0],
y=data_fit[:,1],
z=data_fit[:,2],
mode='markers',
marker=dict(size=2, color='yellow', line=dict(color='black', width=0.5), opacity=0.8)
)
# trace3 is the 2D surface of the fit equation...
#trace3 = go.Surface(z=Z, x=X, y=Y, colorscale='RdBu', opacity=0.667)
trace3 = go.Surface(z=Z, x=X, y=Y, colorscale='Greys', opacity=0.667)
# Package the trace dictionaries into a data object
data_go = go.Data([trace1, trace2, trace3])
# Dictionary of style options for all axes
axis = dict(
showbackground=True, # show axis background
backgroundcolor="rgb(204, 204, 204)", # set background color to grey
gridcolor="rgb(255, 255, 255)", # set grid line color
zerolinecolor="rgb(255, 255, 255)", # set zero grid line color
)
# Create a title...
if norder == 1:
titleText = """%s = %.3f + %.3f*%s + %.3f*%s [rms: %.3f]""" % \
(dmagName, p[0], p[1], colorName1, p[2], colorName2, rms)
elif norder == 2:
titleText = """%s = %.3f + %.3f*%s + %.3f*%s^2 + %.3f*%s + %.3f*%s^2\n[npts=%d, rms: %.3f]""" % \
(dmagName, p[0], p[1], colorName1, p[2], colorName1, p[3], colorName2, p[4], colorName2, n_elements, rms)
else:
titleText = ''
titleText = titleText.replace('$','')
# Make a layout object
layout = go.Layout(
title=titleText, # set plot title
scene=go.Scene( # axes are part of a 'scene' in 3d plots
xaxis=go.XAxis(axis), # set x-axis style
yaxis=go.YAxis(axis), # set y-axis style
zaxis=go.ZAxis(axis)), # set z-axis style
)
# Make a figure object
fig = go.Figure(data=data_go, layout=layout)
# Create interactive plot and save as javascript to html file...
#plotly.offline.iplot(fig, filename=outputFileName)
plotly.offline.plot(fig, filename=outputFileName, auto_open=False)
return 0
##################################
if __name__ == "__main__":
main()
##################################
| gpl-3.0 |
robin-lai/scikit-learn | examples/model_selection/plot_roc.py | 96 | 4487 | """
=======================================
Receiver Operating Characteristic (ROC)
=======================================
Example of Receiver Operating Characteristic (ROC) metric to evaluate
classifier output quality.
ROC curves typically feature true positive rate on the Y axis, and false
positive rate on the X axis. This means that the top left corner of the plot is
the "ideal" point - a false positive rate of zero, and a true positive rate of
one. This is not very realistic, but it does mean that a larger area under the
curve (AUC) is usually better.
The "steepness" of ROC curves is also important, since it is ideal to maximize
the true positive rate while minimizing the false positive rate.
Multiclass settings
-------------------
ROC curves are typically used in binary classification to study the output of
a classifier. In order to extend ROC curve and ROC area to multi-class
or multi-label classification, it is necessary to binarize the output. One ROC
curve can be drawn per label, but one can also draw a ROC curve by considering
each element of the label indicator matrix as a binary prediction
(micro-averaging).
Another evaluation measure for multi-class classification is
macro-averaging, which gives equal weight to the classification of each
label.
.. note::
See also :func:`sklearn.metrics.roc_auc_score`,
:ref:`example_model_selection_plot_roc_crossval.py`.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
from sklearn.metrics import roc_curve, auc
from sklearn.cross_validation import train_test_split
from sklearn.preprocessing import label_binarize
from sklearn.multiclass import OneVsRestClassifier
# Import some data to play with
iris = datasets.load_iris()
X = iris.data
y = iris.target
# Binarize the output
y = label_binarize(y, classes=[0, 1, 2])
n_classes = y.shape[1]
# Add noisy features to make the problem harder
random_state = np.random.RandomState(0)
n_samples, n_features = X.shape
X = np.c_[X, random_state.randn(n_samples, 200 * n_features)]
# shuffle and split training and test sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5,
random_state=0)
# Learn to predict each class against the other
classifier = OneVsRestClassifier(svm.SVC(kernel='linear', probability=True,
random_state=random_state))
y_score = classifier.fit(X_train, y_train).decision_function(X_test)
# Compute ROC curve and ROC area for each class
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(n_classes):
fpr[i], tpr[i], _ = roc_curve(y_test[:, i], y_score[:, i])
roc_auc[i] = auc(fpr[i], tpr[i])
# Compute micro-average ROC curve and ROC area
fpr["micro"], tpr["micro"], _ = roc_curve(y_test.ravel(), y_score.ravel())
roc_auc["micro"] = auc(fpr["micro"], tpr["micro"])
##############################################################################
# Plot of a ROC curve for a specific class
plt.figure()
plt.plot(fpr[2], tpr[2], label='ROC curve (area = %0.2f)' % roc_auc[2])
plt.plot([0, 1], [0, 1], 'k--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic example')
plt.legend(loc="lower right")
plt.show()
##############################################################################
# Plot ROC curves for the multiclass problem
# Compute macro-average ROC curve and ROC area
fpr["macro"] = np.mean([fpr[i] for i in range(n_classes)], axis=0)
tpr["macro"] = np.mean([tpr[i] for i in range(n_classes)], axis=0)
roc_auc["macro"] = auc(fpr["macro"], tpr["macro"])
plt.figure()
plt.plot(fpr["micro"], tpr["micro"],
label='micro-average ROC curve (area = {0:0.2f})'
''.format(roc_auc["micro"]),
linewidth=2)
plt.plot(fpr["macro"], tpr["macro"],
label='macro-average ROC curve (area = {0:0.2f})'
''.format(roc_auc["macro"]),
linewidth=2)
for i in range(n_classes):
plt.plot(fpr[i], tpr[i], label='ROC curve of class {0} (area = {1:0.2f})'
''.format(i, roc_auc[i]))
plt.plot([0, 1], [0, 1], 'k--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Some extension of Receiver operating characteristic to multi-class')
plt.legend(loc="lower right")
plt.show()
| bsd-3-clause |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.