repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
jayhetee/auto-sklearn | test/models/test_holdout_evaluator.py | 5 | 15614 | import copy
import unittest
import os
import shutil
import sys
import traceback
import numpy as np
from numpy.linalg import LinAlgError
import sklearn.datasets
from autosklearn.data.util import convert_to_bin
from autosklearn.data.competition_data_manager import CompetitionDataManager
from autosklearn.models.holdout_evaluator import HoldoutEvaluator
from autosklearn.models.paramsklearn import get_configuration_space
from ParamSklearn.util import get_dataset
from autosklearn.constants import *
N_TEST_RUNS = 10
class Dummy(object):
pass
class HoldoutEvaluator_Test(unittest.TestCase):
def test_evaluate_multiclass_classification(self):
X_train, Y_train, X_test, Y_test = get_dataset('iris')
X_valid = X_test[:25,]
Y_valid = Y_test[:25,]
X_test = X_test[25:,]
Y_test = Y_test[25:,]
D = Dummy()
D.info = {'metric': 'bac_metric', 'task': MULTICLASS_CLASSIFICATION,
'is_sparse': False, 'target_num': 3}
D.data = {'X_train': X_train, 'Y_train': Y_train,
'X_valid': X_valid, 'X_test': X_test}
D.feat_type = ['numerical', 'Numerical', 'numerical', 'numerical']
configuration_space = get_configuration_space(D.info,
include_estimators = ['ridge'],
include_preprocessors = ['select_rates'])
err = np.zeros([N_TEST_RUNS])
for i in range(N_TEST_RUNS):
print "Evaluate configuration: %d; result:" % i,
configuration = configuration_space.sample_configuration()
D_ = copy.deepcopy(D)
evaluator = HoldoutEvaluator(D_, configuration)
if not self._fit(evaluator):
print
continue
err[i] = evaluator.predict()
print err[i]
self.assertTrue(np.isfinite(err[i]))
self.assertGreaterEqual(err[i], 0.0)
print "Number of times it was worse than random guessing:" + str(np.sum(err > 1))
def test_evaluate_multiclass_classification_all_metrics(self):
X_train, Y_train, X_test, Y_test = get_dataset('iris')
X_valid = X_test[:25, ]
Y_valid = Y_test[:25, ]
X_test = X_test[25:, ]
Y_test = Y_test[25:, ]
D = Dummy()
D.info = {'metric': 'bac_metric', 'task': MULTICLASS_CLASSIFICATION,
'is_sparse': False, 'target_num': 3}
D.data = {'X_train': X_train, 'Y_train': Y_train,
'X_valid': X_valid, 'X_test': X_test}
D.feat_type = ['numerical', 'Numerical', 'numerical', 'numerical']
configuration_space = get_configuration_space(D.info,
include_estimators=['ridge'],
include_preprocessors=['select_rates'])
# Test all scoring functions
err = []
for i in range(N_TEST_RUNS):
print "Evaluate configuration: %d; result:" % i,
configuration = configuration_space.sample_configuration()
D_ = copy.deepcopy(D)
evaluator = HoldoutEvaluator(D_, configuration,
all_scoring_functions=True)
if not self._fit(evaluator):
print
continue
err.append(evaluator.predict())
print err[-1]
self.assertIsInstance(err[-1], dict)
for key in err[-1]:
self.assertEqual(len(err[-1]), 5)
self.assertTrue(np.isfinite(err[-1][key]))
self.assertGreaterEqual(err[-1][key], 0.0)
print "Number of times it was worse than random guessing:" + str(
np.sum(err > 1))
def test_evaluate_multilabel_classification(self):
X_train, Y_train, X_test, Y_test = get_dataset('iris')
Y_train = np.array(convert_to_bin(Y_train, 3))
Y_train[:,-1] = 1
Y_test = np.array(convert_to_bin(Y_test, 3))
Y_test[:, -1] = 1
X_valid = X_test[:25, ]
Y_valid = Y_test[:25, ]
X_test = X_test[25:, ]
Y_test = Y_test[25:, ]
D = Dummy()
D.info = {'metric': 'f1_metric', 'task': MULTILABEL_CLASSIFICATION,
'is_sparse': False, 'target_num': 3}
D.data = {'X_train': X_train, 'Y_train': Y_train,
'X_valid': X_valid, 'X_test': X_test}
D.feat_type = ['numerical', 'Numerical', 'numerical', 'numerical']
configuration_space = get_configuration_space(D.info,
include_estimators=['random_forest'],
include_preprocessors=['no_preprocessing'])
err = np.zeros([N_TEST_RUNS])
for i in range(N_TEST_RUNS):
print "Evaluate configuration: %d; result:" % i,
configuration = configuration_space.sample_configuration()
D_ = copy.deepcopy(D)
evaluator = HoldoutEvaluator(D_, configuration)
if not self._fit(evaluator):
print
continue
err[i] = evaluator.predict()
print err[i]
self.assertTrue(np.isfinite(err[i]))
self.assertGreaterEqual(err[i], 0.0)
print "Number of times it was worse than random guessing:" + str(
np.sum(err > 1))
def test_evaluate_binary_classification(self):
X_train, Y_train, X_test, Y_test = get_dataset('iris')
eliminate_class_two = Y_train != 2
X_train = X_train[eliminate_class_two]
Y_train = Y_train[eliminate_class_two]
eliminate_class_two = Y_test != 2
X_test = X_test[eliminate_class_two]
Y_test = Y_test[eliminate_class_two]
X_valid = X_test[:25, ]
Y_valid = Y_test[:25, ]
X_test = X_test[25:, ]
Y_test = Y_test[25:, ]
D = Dummy()
D.info = {'metric': 'auc_metric', 'task': BINARY_CLASSIFICATION,
'is_sparse': False, 'target_num': 2}
D.data = {'X_train': X_train, 'Y_train': Y_train,
'X_valid': X_valid, 'X_test': X_test}
D.feat_type = ['numerical', 'Numerical', 'numerical', 'numerical']
configuration_space = get_configuration_space(D.info,
include_estimators=['ridge'],
include_preprocessors=['select_rates'])
err = np.zeros([N_TEST_RUNS])
for i in range(N_TEST_RUNS):
print "Evaluate configuration: %d; result:" % i,
configuration = configuration_space.sample_configuration()
D_ = copy.deepcopy(D)
evaluator = HoldoutEvaluator(D_, configuration)
if not self._fit(evaluator):
print
continue
err[i] = evaluator.predict()
self.assertTrue(np.isfinite(err[i]))
print err[i]
self.assertGreaterEqual(err[i], 0.0)
print "Number of times it was worse than random guessing:" + str(
np.sum(err > 1))
def test_evaluate_regression(self):
X_train, Y_train, X_test, Y_test = get_dataset('boston')
X_valid = X_test[:200, ]
Y_valid = Y_test[:200, ]
X_test = X_test[200:, ]
Y_test = Y_test[200:, ]
D = Dummy()
D.info = {'metric': 'r2_metric', 'task': REGRESSION,
'is_sparse': False, 'target_num': 1}
D.data = {'X_train': X_train, 'Y_train': Y_train,
'X_valid': X_valid, 'X_test': X_test}
D.feat_type = ['numerical', 'Numerical', 'numerical', 'numerical',
'numerical', 'numerical', 'numerical', 'numerical',
'numerical', 'numerical', 'numerical']
configuration_space = get_configuration_space(D.info,
include_estimators=['random_forest'],
include_preprocessors=['no_preprocessing'])
err = np.zeros([N_TEST_RUNS])
for i in range(N_TEST_RUNS):
print "Evaluate configuration: %d; result:" % i,
configuration = configuration_space.sample_configuration()
D_ = copy.deepcopy(D)
evaluator = HoldoutEvaluator(D_, configuration)
if not self._fit(evaluator):
print
continue
err[i] = evaluator.predict()
self.assertTrue(np.isfinite(err[i]))
print err[i]
self.assertGreaterEqual(err[i], 0.0)
print "Number of times it was worse than random guessing:" + str(
np.sum(err > 1))
def test_with_abalone(self):
dataset = "abalone"
dataset_dir = os.path.join(os.path.dirname(__file__), ".datasets")
D = CompetitionDataManager(dataset, dataset_dir)
configuration_space = get_configuration_space(D.info,
include_estimators=['extra_trees'],
include_preprocessors=['no_preprocessing'])
errors = []
for i in range(N_TEST_RUNS):
configuration = configuration_space.sample_configuration()
D_ = copy.deepcopy(D)
evaluator = HoldoutEvaluator(D_, configuration)
if not self._fit(evaluator):
print
continue
err = evaluator.predict()
self.assertLess(err, 0.99)
self.assertTrue(np.isfinite(err))
errors.append(err)
# This is a reasonable bound
self.assertEqual(10, len(errors))
self.assertLess(min(errors), 0.77)
def test_5000_classes(self):
weights = ([0.0002] * 4750) + ([0.0001] * 250)
X, Y = sklearn.datasets.make_classification(n_samples=10000,
n_features=20,
n_classes=5000,
n_clusters_per_class=1,
n_informative=15,
n_redundant=5,
n_repeated=0,
weights=weights,
flip_y=0,
class_sep=1.0,
hypercube=True,
shift=None,
scale=1.0,
shuffle=True,
random_state=1)
self.assertEqual(250, np.sum(np.bincount(Y) == 1))
D = Dummy()
D.info = {'metric': 'r2_metric', 'task': MULTICLASS_CLASSIFICATION,
'is_sparse': False, 'target_num': 1}
D.data = {'X_train': X, 'Y_train': Y,
'X_valid': X, 'X_test': X}
D.feat_type = ['numerical'] * 5000
configuration_space = get_configuration_space(D.info,
include_estimators=['extra_trees'],
include_preprocessors=['no_preprocessing'])
configuration = configuration_space.sample_configuration()
D_ = copy.deepcopy(D)
evaluator = HoldoutEvaluator(D_, configuration)
evaluator.fit()
def _fit(self, evaluator):
"""Allow us to catch known and valid exceptions for all evaluate
scripts."""
try:
evaluator.fit()
return True
except ValueError as e:
if "Floating-point under-/overflow occurred at epoch" in e.message or \
"removed all features" in e.message or \
"failed to create intent" in e.message:
pass
else:
traceback.print_tb(sys.exc_info()[2])
raise e
except LinAlgError as e:
if "not positive definite, even with jitter" in e.message:
pass
else:
raise e
except AttributeError as e:
# Some error in QDA
if "log" == e.message:
pass
else:
raise e
except RuntimeWarning as e:
if "invalid value encountered in sqrt" in e.message:
pass
elif "divide by zero encountered in divide" in e.message:
pass
else:
raise e
except UserWarning as e:
if "FastICA did not converge" in e.message:
pass
else:
raise e
def test_file_output(self):
output_dir = os.path.join(os.getcwd(), ".test")
try:
shutil.rmtree(output_dir)
except:
pass
X_train, Y_train, X_test, Y_test = get_dataset('iris')
X_valid = X_test[:25, ]
Y_valid = Y_test[:25, ]
X_test = X_test[25:, ]
Y_test = Y_test[25:, ]
D = Dummy()
D.info = {'metric': 'bac_metric', 'task': MULTICLASS_CLASSIFICATION,
'is_sparse': False, 'target_num': 3}
D.data = {'X_train': X_train, 'Y_train': Y_train,
'X_valid': X_valid, 'X_test': X_test}
D.feat_type = ['numerical', 'Numerical', 'numerical', 'numerical']
D.basename = "test"
configuration_space = get_configuration_space(D.info)
while True:
configuration = configuration_space.sample_configuration()
evaluator = HoldoutEvaluator(D, configuration,
with_predictions=True,
all_scoring_functions=True,
output_dir=output_dir,
output_y_test=True)
if not self._fit(evaluator):
print
continue
evaluator.predict()
evaluator.file_output()
self.assertTrue(os.path.exists(os.path.join(output_dir,
"y_optimization.npy")))
break
def test_predict_proba_binary_classification(self):
X_train, Y_train, X_test, Y_test = get_dataset('iris')
eliminate_class_two = Y_train != 2
X_train = X_train[eliminate_class_two]
Y_train = Y_train[eliminate_class_two]
eliminate_class_two = Y_test != 2
X_test = X_test[eliminate_class_two]
Y_test = Y_test[eliminate_class_two]
X_valid = X_test[:25, ]
Y_valid = Y_test[:25, ]
X_test = X_test[25:, ]
Y_test = Y_test[25:, ]
class Dummy2(object):
def predict_proba(self, y, batch_size=200):
return np.array([[0.1, 0.9], [0.7, 0.3]])
model = Dummy2()
task_type = BINARY_CLASSIFICATION
D = Dummy()
D.info = {'metric': 'bac_metric', 'task': task_type,
'is_sparse': False, 'target_num': 3}
D.data = {'X_train': X_train, 'Y_train': Y_train,
'X_valid': X_valid, 'X_test': X_test}
D.feat_type = ['numerical', 'Numerical', 'numerical', 'numerical']
configuration_space = get_configuration_space(
D.info, include_estimators=['ridge'],
include_preprocessors=['select_rates'])
configuration = configuration_space.sample_configuration()
evaluator = HoldoutEvaluator(D, configuration)
pred = evaluator.predict_proba(None, model, task_type)
expected = [[0.9], [0.3]]
for i in range(len(expected)):
self.assertEqual(expected[i], pred[i])
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.test_evaluate']
unittest.main()
| bsd-3-clause |
perpetua1/django-pandas | runtests.py | 3 | 1374 | #!/usr/bin/env python
import os
import sys
import django
from django.conf import settings
if not settings.configured:
settings_dict = dict(
INSTALLED_APPS=(
'django.contrib.contenttypes',
'django_pandas',
'django_pandas.tests',
),
DATABASES={
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": ":memory:",
"USER": "",
"PASSWORD": "",
"HOST": "",
"PORT": "",
}
},
MIDDLEWARE_CLASSES = ()
)
settings.configure(**settings_dict)
if django.VERSION >= (1, 7):
django.setup()
def runtests(*test_args):
if not test_args:
test_args = ['django_pandas']
parent = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, parent)
if django.VERSION < (1, 8):
from django.test.simple import DjangoTestSuiteRunner
failures = DjangoTestSuiteRunner(
verbosity=1, interactive=True, failfast=False).run_tests(['tests'])
sys.exit(failures)
else:
from django.test.runner import DiscoverRunner
failures = DiscoverRunner(
verbosity=1, interactive=True, failfast=False).run_tests(test_args)
sys.exit(failures)
if __name__ == '__main__':
runtests()
| bsd-3-clause |
sheltowt/PIXLEE-computer-vision-clustering | build/lib.linux-x86_64-2.7/PCV/geometry/warp.py | 12 | 5580 | import matplotlib.delaunay as md
from scipy import ndimage
from pylab import *
from numpy import *
from PCV.geometry import homography
def image_in_image(im1,im2,tp):
""" Put im1 in im2 with an affine transformation
such that corners are as close to tp as possible.
tp are homogeneous and counter-clockwise from top left. """
# points to warp from
m,n = im1.shape[:2]
fp = array([[0,m,m,0],[0,0,n,n],[1,1,1,1]])
# compute affine transform and apply
H = homography.Haffine_from_points(tp,fp)
im1_t = ndimage.affine_transform(im1,H[:2,:2],
(H[0,2],H[1,2]),im2.shape[:2])
alpha = (im1_t > 0)
return (1-alpha)*im2 + alpha*im1_t
def combine_images(im1,im2,alpha):
""" Blend two images with weights as in alpha. """
return (1-alpha)*im1 + alpha*im2
def alpha_for_triangle(points,m,n):
""" Creates alpha map of size (m,n)
for a triangle with corners defined by points
(given in normalized homogeneous coordinates). """
alpha = zeros((m,n))
for i in range(min(points[0]),max(points[0])):
for j in range(min(points[1]),max(points[1])):
x = linalg.solve(points,[i,j,1])
if min(x) > 0: #all coefficients positive
alpha[i,j] = 1
return alpha
def triangulate_points(x,y):
""" Delaunay triangulation of 2D points. """
centers,edges,tri,neighbors = md.delaunay(x,y)
return tri
def plot_mesh(x,y,tri):
""" Plot triangles. """
for t in tri:
t_ext = [t[0], t[1], t[2], t[0]] # add first point to end
plot(x[t_ext],y[t_ext],'r')
def pw_affine(fromim,toim,fp,tp,tri):
""" Warp triangular patches from an image.
fromim = image to warp
toim = destination image
fp = from points in hom. coordinates
tp = to points in hom. coordinates
tri = triangulation. """
im = toim.copy()
# check if image is grayscale or color
is_color = len(fromim.shape) == 3
# create image to warp to (needed if iterate colors)
im_t = zeros(im.shape, 'uint8')
for t in tri:
# compute affine transformation
H = homography.Haffine_from_points(tp[:,t],fp[:,t])
if is_color:
for col in range(fromim.shape[2]):
im_t[:,:,col] = ndimage.affine_transform(
fromim[:,:,col],H[:2,:2],(H[0,2],H[1,2]),im.shape[:2])
else:
im_t = ndimage.affine_transform(
fromim,H[:2,:2],(H[0,2],H[1,2]),im.shape[:2])
# alpha for triangle
alpha = alpha_for_triangle(tp[:,t],im.shape[0],im.shape[1])
# add triangle to image
im[alpha>0] = im_t[alpha>0]
return im
def panorama(H,fromim,toim,padding=2400,delta=2400):
""" Create horizontal panorama by blending two images
using a homography H (preferably estimated using RANSAC).
The result is an image with the same height as toim. 'padding'
specifies number of fill pixels and 'delta' additional translation. """
# check if images are grayscale or color
is_color = len(fromim.shape) == 3
# homography transformation for geometric_transform()
def transf(p):
p2 = dot(H,[p[0],p[1],1])
return (p2[0]/p2[2],p2[1]/p2[2])
if H[1,2]<0: # fromim is to the right
print 'warp - right'
# transform fromim
if is_color:
# pad the destination image with zeros to the right
toim_t = hstack((toim,zeros((toim.shape[0],padding,3))))
fromim_t = zeros((toim.shape[0],toim.shape[1]+padding,toim.shape[2]))
for col in range(3):
fromim_t[:,:,col] = ndimage.geometric_transform(fromim[:,:,col],
transf,(toim.shape[0],toim.shape[1]+padding))
else:
# pad the destination image with zeros to the right
toim_t = hstack((toim,zeros((toim.shape[0],padding))))
fromim_t = ndimage.geometric_transform(fromim,transf,
(toim.shape[0],toim.shape[1]+padding))
else:
print 'warp - left'
# add translation to compensate for padding to the left
H_delta = array([[1,0,0],[0,1,-delta],[0,0,1]])
H = dot(H,H_delta)
# transform fromim
if is_color:
# pad the destination image with zeros to the left
toim_t = hstack((zeros((toim.shape[0],padding,3)),toim))
fromim_t = zeros((toim.shape[0],toim.shape[1]+padding,toim.shape[2]))
for col in range(3):
fromim_t[:,:,col] = ndimage.geometric_transform(fromim[:,:,col],
transf,(toim.shape[0],toim.shape[1]+padding))
else:
# pad the destination image with zeros to the left
toim_t = hstack((zeros((toim.shape[0],padding)),toim))
fromim_t = ndimage.geometric_transform(fromim,
transf,(toim.shape[0],toim.shape[1]+padding))
# blend and return (put fromim above toim)
if is_color:
# all non black pixels
alpha = ((fromim_t[:,:,0] * fromim_t[:,:,1] * fromim_t[:,:,2] ) > 0)
for col in range(3):
toim_t[:,:,col] = fromim_t[:,:,col]*alpha + toim_t[:,:,col]*(1-alpha)
else:
alpha = (fromim_t > 0)
toim_t = fromim_t*alpha + toim_t*(1-alpha)
return toim_t
| bsd-2-clause |
dgary50/eovsa | spectrogram_fit_bkp.py | 1 | 20986 | '''
Module for plotting EOVSA data as a spectrogram'''
#
# History:
# 2015-May-09 DG
# First written.
# 2015-Jun-04 DG
# Major enhancements to add Spectrogram() class and many methods for working
# with it.
# 2015-Jun-07 DG
# Additional enhancements, especially to do total-power fits with the Staehli
# function. Also added a version number.
# 2015-Jun-08 RG
# Additional enhancement, now finds a fit for the four parameters of the Staehli
# function.
# 2015-Jun-22 DG
# Changes to reflect new selection of science bands, which resulted in a lot
# of frequencies with all zero data. Just eliminate those from the arrays.
# Also updated the spectrum-fitting routines to return physical parameters
# f_pk, S_pk, low-f slope, high-f slope
# 2015-Jun-27 DG
# Changed the code to standardize on names, content, and order of indices
# of outputs for rd_miriad_tsys. Names ut_mjd, fghz, and tsys will be used,
# with units hopefully made obvious. Order of indices will be
# (nant/nbl, npol, nfreq, ntimes).
# This required some changes to several of the routines here.
# 2015-Jun-30 DG
# Added suptitle() to explore plot.
# 2015-Jul-03 DG
# Changes to plot_spectrogram() to handle case of plotting xdata (cross-correlation
# amplitude)
# 2016-May-01 DG
# Changed to call read_dbcalfac(), to read calibration data from SQL database.
# 2017-Aug-16 DG
# Fixed a problem in tpfit, when nans or infs were in the data.
#
__version__ = '0.2'
def log_sample(fghz, ut, tsys):
''' Resamples a spectrogram from an irregular sampling in linear frequency space
to a regular grid in log frequency space.
'''
from scipy import interpolate
import numpy as np
nf = len(fghz)
fghzl = np.logspace(np.log10(fghz[0]),np.log10(fghz[-1]),nf)
x, y = np.meshgrid(ut,fghzl)
fint = interpolate.interp2d(ut,fghz,tsys,kind='cubic')
out = fint(ut,fghzl)
return fghzl,out
def lin_sample(fghz, ut, tsys):
''' Resamples a spectrogram from an irregular sampling in linear frequency space
to a regular grid in linear frequency space.
'''
from scipy import interpolate
import numpy as np
nf = len(fghz)
fghzl = np.linspace(fghz[0],fghz[-1],nf)
x, y = np.meshgrid(ut,fghzl)
fint = interpolate.interp2d(ut,fghz,tsys)#,kind='cubic')
out = fint(ut,fghzl)
return fghzl,out
def plot_spectrogram(fghz, ut, tsys, ax=None, cbar=True, logsample=False, **kwargs):
''' Creates standard spectrogram plot for EOVSA data, using axes supplied
or creates a new single axis plot if None. The intensities are log-scaled,
the xaxis is interpreted as time (ut can be timestamps or plot_date format)
kwargs:
dmin Clip data to this minimum value [sfu] (default = 10 sfu)
dmax Clip data to this maximum value [sfu] (default = tsys.max())
xlabel String to use as xlabel
(default is 'Time [UT on YYYY-MM-DD]' if ax is supplied--none otherwise)
ylabel String to use as ylabel
(default is 'Frequency [GHz]' if ax is supplied--none otherwise)
title String to use as plot title
(default is no title)
logsample Boolean. If True, resample the data on a logarithmic
frequency space, if False resample on a linear space,
if None, do no resampling in frequency
xdata Boolean. If True, label graph for cross-correlation. If False,
or omitted, lable graph for Total Power
'''
import matplotlib.pylab as plt
import matplotlib.dates
import numpy as np
utd = ut.plot_date
datstr = ut[0].iso[:10]
if ax is None:
# No axes supplied, so create one (and assume labels are wanted)
f, ax = plt.subplots(1,1)
ax.set_xlabel('Time [UT on '+datstr+']')
ax.set_ylabel('Frequency [GHz]')
ax.set_title('EOVSA Total Power for '+datstr)
if 'xdata' in kwargs.keys():
if kwargs['xdata'] is True:
ax.set_title('EOVSA Summed Cross-Correlation Amplitude for '+datstr)
ax.xaxis.set_tick_params(width=1.5,size=10,which='both')
ax.yaxis.set_tick_params(width=1.5,size=10,which='both')
if logsample:
# Sample data only a uniform logarithmic frequency space
fghzl, tsysl = log_sample(fghz, utd, tsys)
ax.set_yscale('log')
minorFormatter = plt.LogFormatter(base=10, labelOnlyBase=False)
ax.yaxis.set_minor_formatter(minorFormatter)
elif logsample is None:
# No sampling in frequency space
fghzl, tsysl = fghz, tsys
else:
# logsample is not True or None (presumably False), so resample
# data on a uniform linear frequency space
fghzl, tsysl = lin_sample(fghz, utd, tsys)
dmin = 1.
if 'dmin' in kwargs.keys():
if kwargs['dmin'] is not None:
dmin = kwargs['dmin']
dmax = tsys.max()
if 'dmax' in kwargs.keys():
if kwargs['dmax'] is not None:
dmax = kwargs['dmax']
# Take logarithm if TP, but not for Cross-correlation amplitude
data = np.log10(np.clip(tsysl,dmin,dmax))
if 'xdata' in kwargs.keys():
if kwargs['xdata'] is True:
data = np.clip(tsysl,dmin,dmax)
im = ax.imshow(data,origin='lower',extent=[utd[0],utd[-1],fghzl[0],fghzl[-1]],
aspect='auto',interpolation='nearest')
if cbar:
cbar_label = 'Log Flux Density [sfu]'
if 'xdata' in kwargs.keys():
if kwargs['xdata'] is True:
cbar_label = 'Amplitude [arb. units]'
plt.colorbar(im,ax=ax,label=cbar_label)
ax.xaxis_date()
ax.xaxis.set_major_formatter(matplotlib.dates.DateFormatter("%H:%M:%S"))
# Set labels if requested
if 'xlabel' in kwargs.keys():
if kwargs['xlabel'] == 'auto':
ax.set_xlabel('Time [UT on '+datstr+']')
else:
ax.set_xlabel(kwargs['xlabel'])
if 'ylabel' in kwargs.keys():
if kwargs['ylabel'] == 'auto':
ax.set_ylabel('Frequency [GHz]')
else:
ax.set_ylabel(kwargs['ylabel'])
if 'title' in kwargs.keys():
ax.set_title(kwargs['title'])
return ax
import numpy as np
import dump_tsys
from util import Time, common_val_idx
import offline
class Spectrogram():
def __init__(self,trange):
''' Create the object for the specified timerange specified by the
2-element Time() trange. The timerange is used to create a list
of Miriad database files to read, and the data are read.
'''
# Read data, assuming 16-element correlator if after 2016 May 1
if trange[0].lv > 3544905600.0:
out = dump_tsys.rd_miriad_tsys_16(trange)
else:
out = dump_tsys.rd_miriad_tsys(trange)
nant, npol, nf, nt = out['tsys'].shape
self.xdata = out['tsys'][:,0,:,:]
self.ydata = out['tsys'][:,1,:,:]
self.fghz = out['fghz']
self.time = Time(out['ut_mjd'],format='mjd')
self.tidx = [0,len(self.time)]
# Read calibration
fghz, self.calfac, self.offsun = offline.read_dbcalfac(trange[0])
# Make sure frequencies in data and calibration agree
calidx, dataidx = common_val_idx((fghz*1000).astype(np.int),(self.fghz*1000).astype(np.int))
self.bidx = [0,100]
self.fghz = self.fghz[dataidx]
self.xdata = self.xdata[:,dataidx,:]
self.ydata = self.ydata[:,dataidx,:]
if self.calfac is not None:
# Select frequencies and swap axes to put into standard form
self.calfac = np.rollaxis(self.calfac[:,calidx,:],2)
self.offsun = np.rollaxis(self.offsun[:,calidx,:],2)
fghz = fghz[calidx]
# Set frequency index range (fidx) to default to show only frequencies > 2.5 GHz
lowf, = np.where(self.fghz > 2.5)
self.fidx = [lowf[0],len(self.fghz)]
self.drange = [None,None]
self.antlist = range(nant)
self.cbar = True
self.showants = range(nant)
self.domedian = True
self.docal = True
self.dolog = False
self.dosub = True
self.ax = None
self.version = __version__
def show(self):
''' Create a spectrogram plot of the data
'''
tsys, stdtsys = self.get_data()
if self.domedian:
# This results in only a single plot
self.ax = plot_spectrogram(self.fghz[self.fidx[0]:self.fidx[1]], self.time[self.tidx[0]:self.tidx[1]], tsys, ax=self.ax, cbar=self.cbar, logsample=self.dolog, dmin=self.drange[0], dmax=self.drange[1])
else:
print 'Cannot (yet) plot data for each anteanna separately. Please set <self>.domedian = True first'
def get_median_data(self, xtsys=None, ytsys=None):
''' Get optionally calibrated, optionally background subtracted
data as median over polarization and antenna list in self.showants
'''
if xtsys is None:
if self.docal:
# Do calibration and optionally subtraction
xtsys, ytsys = self.get_cal_data()
if self.dosub:
xtsys, ytsys = self.get_bgsub_data(xtsys, ytsys)
elif self.dosub:
# No calibration, so select data and do subtraction
xtsys = self.xdata[self.antlist,self.fidx[0]:self.fidx[1],self.tidx[0]:self.tidx[1]]
ytsys = self.ydata[self.antlist,self.fidx[0]:self.fidx[1],self.tidx[0]:self.tidx[1]]
xtsys, ytsys = self.get_bgsub_data(xtsys, ytsys)
medxtsys = np.nanmedian(xtsys[self.showants,:,:],0)
stdxtsys = np.nanstd(xtsys[self.showants,:,:],0)
medytsys = np.nanmedian(ytsys[self.showants,:,:],0)
stdytsys = np.nanstd(ytsys[self.showants,:,:],0)
tsys = (medxtsys+medytsys)/2.
stdtsys = np.sqrt(stdxtsys**2 + stdytsys**2)/2.
return tsys, stdtsys
def get_cal_data(self):
# Select data
xtsys = np.zeros((len(self.antlist), self.fidx[1] - self.fidx[0], self.tidx[1] - self.tidx[0]),dtype='float')
ytsys = np.zeros((len(self.antlist), self.fidx[1] - self.fidx[0], self.tidx[1] - self.tidx[0]),dtype='float')
# Apply calibration
for i,j in enumerate(range(self.tidx[0], self.tidx[1])):
xtsys[:,:,i] = ((self.xdata[self.antlist, self.fidx[0]:self.fidx[1], j] -
self.offsun[self.antlist, 0, self.fidx[0]:self.fidx[1]])
*self.calfac[self.antlist, 0, self.fidx[0]:self.fidx[1]])
ytsys[:,:,i] = ((self.ydata[self.antlist, self.fidx[0]:self.fidx[1], j] -
self.offsun[self.antlist, 1, self.fidx[0]:self.fidx[1]])
*self.calfac[self.antlist, 1, self.fidx[0]:self.fidx[1]])
return xtsys, ytsys
def get_bgsub_data(self,xtsys=None, ytsys=None):
''' Get optionally calibrated data after background subtraction is applied.
'''
if xtsys is None:
if self.docal:
# Do calibration and optionally subtraction
xtsys, ytsys = self.get_cal_data()
if self.dosub:
xtsys, ytsys = self.get_bgsub_data(xtsys, ytsys)
else:
# No calibration, so select data and raw subtraction
xtsys = self.xdata[self.antlist,self.fidx[0]:self.fidx[1],self.tidx[0]:self.tidx[1]]
ytsys = self.ydata[self.antlist,self.fidx[0]:self.fidx[1],self.tidx[0]:self.tidx[1]]
# Perform the background subtraction
bgx, bgy = self.getbg(self.bidx, xtsys, ytsys)
nt = self.tidx[1] - self.tidx[0]
nf = self.fidx[1] - self.fidx[0]
for i in range(nt):
xtsys[:,0:nf,i] -= bgx
ytsys[:,0:nf,i] -= bgy
return xtsys, ytsys
def get_data(self):
''' Get optionally calibrated, optionally background-subtracted data.
If self.domedian is True, return the median of data over polarization
and antenna list in self.showants.
'''
if self.docal:
xtsys, ytsys = self.get_cal_data()
else:
xtsys = self.xdata[self.antlist,self.fidx[0]:self.fidx[1],self.tidx[0]:self.tidx[1]]
ytsys = self.ydata[self.antlist,self.fidx[0]:self.fidx[1],self.tidx[0]:self.tidx[1]]
if self.dosub:
xtsys, ytsys = self.get_bgsub_data(xtsys, ytsys)
if self.domedian:
tsys, stdtsys = self.get_median_data(xtsys, ytsys)
else:
tsys = np.swapaxes(np.array([xtsys, ytsys]),1,0)
stdtsys = None
return tsys, stdtsys
def getbg(self, bidx=None, xtsys=None, ytsys=None):
''' Get background spectra for each antenna and polarization, applying
calibration first if indicated by self.docal = True
'''
if bidx is None:
bidx = self.bidx
else:
self.bidx = bidx
if xtsys is None:
# No data supplied, so generate it
if self.docal:
# Do calibration
xtsys, ytsys = self.get_cal_data()
else:
# No calibration desired, so just select raw data
xtsys = self.xdata[self.antlist,self.fidx[0]:self.fidx[1],self.tidx[0]:self.tidx[1]]
ytsys = self.ydata[self.antlist,self.fidx[0]:self.fidx[1],self.tidx[0]:self.tidx[1]]
# Generate median over supplied background indexes. These are spectra for each antenna in self.antlist
bgx = np.nanmedian(xtsys[:,:,bidx[0]:bidx[1]],2)
bgy = np.nanmedian(ytsys[:,:,bidx[0]:bidx[1]],2)
return bgx, bgy
def explore(self):
''' Like show(), but provides a mouse-driven interface for exploring the plot
after creation. Only works for median data.
'''
import matplotlib.pylab as plt
tsys, stdtsys = self.get_median_data()
dlogtsys = stdtsys/tsys
fig = plt.figure(figsize=(8,6))
fig.suptitle('EOVSA Spectrogram '+self.time[self.tidx[0]].iso[:10],fontsize=25)
spectrogram_ax = plt.axes([0.1,0.4,0.6,0.5])
spectrogram_ax.set_ylabel('Frequency [GHz]')
spectrum_ax = plt.axes([0.75,0.4,0.23,0.5])
spectrum_ax.set_ylim(1.,tsys.max())
try:
spectrum_ax.set_yscale('log')
except:
pass
spectrum_ax.set_xscale('log')
spectrum_ax.set_xlim(1,18)
spectrum_ax.set_ylabel('Flux Density [sfu]')
spectrum_ax.set_xlabel('Frequency [GHz]')
# Set initial spectruma and lightcurve to correspond to mid-range
# time and frequency
fghz = self.fghz[self.fidx[0]:self.fidx[1]]
t = self.time[self.tidx[0]:self.tidx[1]].plot_date
midf = len(fghz)/2
midt = len(t)/2
spec, = spectrum_ax.plot(fghz,tsys[:,midt],'.')
p, ffit, sfit = tpfit(np.log(fghz),np.log(tsys[:,midt]),sigma=dlogtsys[:,midt])
specfit, = spectrum_ax.plot(np.exp(ffit),np.exp(sfit))
specpt, = spectrum_ax.plot(fghz[midf],tsys[midf,midt],'<',markersize=5,c='y')
if self.cbar:
lc_ax = plt.axes([0.1,0.1,0.48,0.25],sharex=spectrogram_ax)
else:
lc_ax = plt.axes([0.1,0.1,0.6,0.25],sharex=spectrogram_ax)
lc, = lc_ax.plot_date(t,tsys[midf,:],'-')
lcpt, = lc_ax.plot_date(t[midt],tsys[midf,midt],'^',markersize=5,c='y')
lc_ax.set_ylim(1.,tsys.max())
try:
lc_ax.set_yscale('log')
except:
pass
lc_ax.set_ylabel('Flux Density [sfu]')
lc_ax.set_xlabel('Time [UT]')
tstr = Time(t[midt],format='plot_date').iso[11:19]
lctxt = lc_ax.text(0.02,0.9,'{:} UT, {:0.3f} GHz, {:0.3f} sfu'.format(tstr,fghz[midf],tsys[midf,midt]), transform=lc_ax.transAxes)
sptxt = spectrum_ax.text(0.02,0.9,'{:} UT, {:0.3f} GHz, {:0.3f} sfu'.format(tstr,fghz[midf],tsys[midf,midt]), transform=spectrum_ax.transAxes)
self.ax = spectrogram_ax
self.show()
def find_ij(x, y):
return abs(utd - x).argmin(), abs(fghz - y).argmin()
def onmove(event):
if event.inaxes != spectrogram_ax: return
i, j = abs(t - event.xdata).argmin(), abs(fghz - event.ydata).argmin()
#print 'indexes are t=%f, f=%f'%(i, j)
spec.set_data(fghz,tsys[:,i])
p, ffit, sfit = tpfit(np.log(fghz),np.log(tsys[:,i]),sigma=dlogtsys[:,i])
specfit.set_data(np.exp(ffit),np.exp(sfit))
specpt.set_data(fghz[j],tsys[j,i])
lc.set_data(t,tsys[j,:])
lcpt.set_data(t[i],tsys[j,i])
tstr = Time(t[i],format='plot_date').iso[11:19]
lctxt.set_text('{:} UT, {:0.3f} GHz, {:0.3f} sfu'.format(tstr,fghz[j],tsys[j,i]))
sptxt.set_text('{:} UT, {:0.3f} GHz, {:0.3f} sfu'.format(tstr,fghz[j],tsys[j,i]))
fig.canvas.draw()
def onclick(event):
if event.inaxes != spectrogram_ax: return
# print 'Turning on mouse-move events'
fig.mid = fig.canvas.mpl_connect('motion_notify_event', onmove)
def onrelease(event):
if event.inaxes != spectrogram_ax: return
# print 'Turning off mouse-move events'
fig.canvas.mpl_disconnect(fig.mid)
self.cid = fig.canvas.mpl_connect('button_press_event', onclick)
self.rid = fig.canvas.mpl_connect('button_release_event', onrelease)
def get_fit(self):
''' Returns an array (in this order) of peak frequency, peak flux density
low-frequency index (slope), and high-frequency index (slope) for each
time in self.tidx. This is only valid for calibrated, background-
subtracted burst emission.
'''
tsys, stdtsys = self.get_median_data()
nf, nt = tsys.shape
self.pfit = np.zeros((4,nt))
dlogtsys = stdtsys/tsys
for i in range(nt):
p, ffit, sfit = tpfit(np.log(self.fghz[self.fidx[0]:self.fidx[1]]),np.log(tsys[:,i]),sigma=dlogtsys[:,i])
# Calculate and return physical parameters S_pk, f_pk, alpha and beta
# Note that if the fit does not work, S_pk and f_pk are determined from the
# data, not the fit, and the slopes are nan.
S_pk = np.exp(sfit).max()
f_pk = np.exp(ffit[sfit.argmax()])
lf_slope = p[1]
hf_slope = p[1]-p[3]
self.pfit[:,i] = [f_pk, S_pk, lf_slope, hf_slope]
def peval(x, a, b, c, d):
''' Function called by curve_fit() to evaluate fitting function.
'''
# Original Staehli expression S = exp(A)*f^alpha*[1-exp(-exp(B)*f^(-beta))]
# can be written log S = A + alpha*log(f) + log(1-exp(-exp(B-beta*log(f))))
# Hence, in the expression below:
# x = log(f) [base-e]
# a = A
# b = alpha
# c = B
# d = beta
# Physical parameters are:
# Low-frequency slope = b
# High-frequency slope = alpha - beta = b - d
# Analytical expressions for S_pk and f_pk are not available, so
# the peak frequency and flux density are determined from the
# fit array
return a+b*x+np.log(1-np.exp(-np.exp(c-d*x)))
def tpfit(X,data,sigma=None):
''' Given a set of log(frequencies) X and total power log(flux density) at each
frequency, fit the data with the Staehli function and return the fit parameters and
x,y arrays of smooth, fitted data '''
from scipy.optimize import curve_fit
# This complains if there are nans or infs in the data, so remove them first
good = np.isfinite(data)
X = X[good]
data = data[good]
if sigma is not None:
sigma = sigma[good]
if len(data) < 4:
return np.zeros(4),np.array([0,1]),np.array([0,1])
y = data
x = np.linspace(0,2.9,20000)
try:
params, pcov = curve_fit(peval, X, y, sigma=sigma)
a, b, c, d = params
y = peval(x, a, b, c, d)
except:
params = [np.nan]*4
return params, x, y
| gpl-2.0 |
HyperloopTeam/FullOpenMDAO | lib/python2.7/site-packages/mpl_toolkits/mplot3d/axis3d.py | 9 | 17055 | #!/usr/bin/python
# axis3d.py, original mplot3d version by John Porter
# Created: 23 Sep 2005
# Parts rewritten by Reinier Heeres <[email protected]>
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import math
import copy
from matplotlib import lines as mlines, axis as maxis, \
patches as mpatches
from . import art3d
from . import proj3d
import numpy as np
def get_flip_min_max(coord, index, mins, maxs):
if coord[index] == mins[index]:
return maxs[index]
else:
return mins[index]
def move_from_center(coord, centers, deltas, axmask=(True, True, True)):
'''Return a coordinate that is moved by "deltas" away from the center.'''
coord = copy.copy(coord)
#print coord, centers, deltas, axmask
for i in range(3):
if not axmask[i]:
continue
if coord[i] < centers[i]:
coord[i] -= deltas[i]
else:
coord[i] += deltas[i]
return coord
def tick_update_position(tick, tickxs, tickys, labelpos):
'''Update tick line and label position and style.'''
for (label, on) in ((tick.label1, tick.label1On), \
(tick.label2, tick.label2On)):
if on:
label.set_position(labelpos)
tick.tick1On, tick.tick2On = True, False
tick.tick1line.set_linestyle('-')
tick.tick1line.set_marker('')
tick.tick1line.set_data(tickxs, tickys)
tick.gridline.set_data(0, 0)
class Axis(maxis.XAxis):
# These points from the unit cube make up the x, y and z-planes
_PLANES = (
(0, 3, 7, 4), (1, 2, 6, 5), # yz planes
(0, 1, 5, 4), (3, 2, 6, 7), # xz planes
(0, 1, 2, 3), (4, 5, 6, 7), # xy planes
)
# Some properties for the axes
_AXINFO = {
'x': {'i': 0, 'tickdir': 1, 'juggled': (1, 0, 2),
'color': (0.95, 0.95, 0.95, 0.5)},
'y': {'i': 1, 'tickdir': 0, 'juggled': (0, 1, 2),
'color': (0.90, 0.90, 0.90, 0.5)},
'z': {'i': 2, 'tickdir': 0, 'juggled': (0, 2, 1),
'color': (0.925, 0.925, 0.925, 0.5)},
}
def __init__(self, adir, v_intervalx, d_intervalx, axes, *args, **kwargs):
# adir identifies which axes this is
self.adir = adir
# data and viewing intervals for this direction
self.d_interval = d_intervalx
self.v_interval = v_intervalx
# This is a temporary member variable.
# Do not depend on this existing in future releases!
self._axinfo = self._AXINFO[adir].copy()
self._axinfo.update({'label' : {'space_factor': 1.6,
'va': 'center',
'ha': 'center'},
'tick' : {'inward_factor': 0.2,
'outward_factor': 0.1},
'ticklabel': {'space_factor': 0.7},
'axisline': {'linewidth': 0.75,
'color': (0, 0, 0, 1)},
'grid' : {'color': (0.9, 0.9, 0.9, 1),
'linewidth': 1.0},
})
maxis.XAxis.__init__(self, axes, *args, **kwargs)
self.set_rotate_label(kwargs.get('rotate_label', None))
def init3d(self):
self.line = mlines.Line2D(xdata=(0, 0), ydata=(0, 0),
linewidth=self._axinfo['axisline']['linewidth'],
color=self._axinfo['axisline']['color'],
antialiased=True,
)
# Store dummy data in Polygon object
self.pane = mpatches.Polygon(np.array([[0,0], [0,1], [1,0], [0,0]]),
closed=False,
alpha=0.8,
facecolor=(1,1,1,0),
edgecolor=(1,1,1,0))
self.set_pane_color(self._axinfo['color'])
self.axes._set_artist_props(self.line)
self.axes._set_artist_props(self.pane)
self.gridlines = art3d.Line3DCollection([], )
self.axes._set_artist_props(self.gridlines)
self.axes._set_artist_props(self.label)
self.axes._set_artist_props(self.offsetText)
# Need to be able to place the label at the correct location
self.label._transform = self.axes.transData
self.offsetText._transform = self.axes.transData
def get_tick_positions(self):
majorLocs = self.major.locator()
self.major.formatter.set_locs(majorLocs)
majorLabels = [self.major.formatter(val, i) for i, val in enumerate(majorLocs)]
return majorLabels, majorLocs
def get_major_ticks(self, numticks=None):
ticks = maxis.XAxis.get_major_ticks(self, numticks)
for t in ticks:
t.tick1line.set_transform(self.axes.transData)
t.tick2line.set_transform(self.axes.transData)
t.gridline.set_transform(self.axes.transData)
t.label1.set_transform(self.axes.transData)
t.label2.set_transform(self.axes.transData)
return ticks
def set_pane_pos(self, xys):
xys = np.asarray(xys)
xys = xys[:,:2]
self.pane.xy = xys
def set_pane_color(self, color):
'''Set pane color to a RGBA tuple'''
self._axinfo['color'] = color
self.pane.set_edgecolor(color)
self.pane.set_facecolor(color)
self.pane.set_alpha(color[-1])
def set_rotate_label(self, val):
'''
Whether to rotate the axis label: True, False or None.
If set to None the label will be rotated if longer than 4 chars.
'''
self._rotate_label = val
def get_rotate_label(self, text):
if self._rotate_label is not None:
return self._rotate_label
else:
return len(text) > 4
def _get_coord_info(self, renderer):
minx, maxx, miny, maxy, minz, maxz = self.axes.get_w_lims()
if minx > maxx:
minx, maxx = maxx, minx
if miny > maxy:
miny, maxy = maxy, miny
if minz > maxz:
minz, maxz = maxz, minz
mins = np.array((minx, miny, minz))
maxs = np.array((maxx, maxy, maxz))
centers = (maxs + mins) / 2.
deltas = (maxs - mins) / 12.
mins = mins - deltas / 4.
maxs = maxs + deltas / 4.
vals = mins[0], maxs[0], mins[1], maxs[1], mins[2], maxs[2]
tc = self.axes.tunit_cube(vals, renderer.M)
avgz = [tc[p1][2] + tc[p2][2] + tc[p3][2] + tc[p4][2] for \
p1, p2, p3, p4 in self._PLANES]
highs = np.array([avgz[2*i] < avgz[2*i+1] for i in range(3)])
return mins, maxs, centers, deltas, tc, highs
def draw_pane(self, renderer):
renderer.open_group('pane3d')
mins, maxs, centers, deltas, tc, highs = self._get_coord_info(renderer)
info = self._axinfo
index = info['i']
if not highs[index]:
plane = self._PLANES[2 * index]
else:
plane = self._PLANES[2 * index + 1]
xys = [tc[p] for p in plane]
self.set_pane_pos(xys)
self.pane.draw(renderer)
renderer.close_group('pane3d')
def draw(self, renderer):
self.label._transform = self.axes.transData
renderer.open_group('axis3d')
# code from XAxis
majorTicks = self.get_major_ticks()
majorLocs = self.major.locator()
info = self._axinfo
index = info['i']
# filter locations here so that no extra grid lines are drawn
locmin, locmax = self.get_view_interval()
if locmin > locmax:
locmin, locmax = locmax, locmin
# Rudimentary clipping
majorLocs = [loc for loc in majorLocs if
locmin <= loc <= locmax]
self.major.formatter.set_locs(majorLocs)
majorLabels = [self.major.formatter(val, i)
for i, val in enumerate(majorLocs)]
mins, maxs, centers, deltas, tc, highs = self._get_coord_info(renderer)
# Determine grid lines
minmax = np.where(highs, maxs, mins)
# Draw main axis line
juggled = info['juggled']
edgep1 = minmax.copy()
edgep1[juggled[0]] = get_flip_min_max(edgep1, juggled[0], mins, maxs)
edgep2 = edgep1.copy()
edgep2[juggled[1]] = get_flip_min_max(edgep2, juggled[1], mins, maxs)
pep = proj3d.proj_trans_points([edgep1, edgep2], renderer.M)
centpt = proj3d.proj_transform(centers[0], centers[1], centers[2], renderer.M)
self.line.set_data((pep[0][0], pep[0][1]), (pep[1][0], pep[1][1]))
self.line.draw(renderer)
# Grid points where the planes meet
xyz0 = []
for val in majorLocs:
coord = minmax.copy()
coord[index] = val
xyz0.append(coord)
# Draw labels
peparray = np.asanyarray(pep)
# The transAxes transform is used because the Text object
# rotates the text relative to the display coordinate system.
# Therefore, if we want the labels to remain parallel to the
# axis regardless of the aspect ratio, we need to convert the
# edge points of the plane to display coordinates and calculate
# an angle from that.
# TODO: Maybe Text objects should handle this themselves?
dx, dy = (self.axes.transAxes.transform(peparray[0:2, 1]) -
self.axes.transAxes.transform(peparray[0:2, 0]))
lxyz = 0.5*(edgep1 + edgep2)
labeldeltas = info['label']['space_factor'] * deltas
axmask = [True, True, True]
axmask[index] = False
lxyz = move_from_center(lxyz, centers, labeldeltas, axmask)
tlx, tly, tlz = proj3d.proj_transform(lxyz[0], lxyz[1], lxyz[2], \
renderer.M)
self.label.set_position((tlx, tly))
if self.get_rotate_label(self.label.get_text()):
angle = art3d.norm_text_angle(math.degrees(math.atan2(dy, dx)))
self.label.set_rotation(angle)
self.label.set_va(info['label']['va'])
self.label.set_ha(info['label']['ha'])
self.label.draw(renderer)
# Draw Offset text
# Which of the two edge points do we want to
# use for locating the offset text?
if juggled[2] == 2 :
outeredgep = edgep1
outerindex = 0
else :
outeredgep = edgep2
outerindex = 1
pos = copy.copy(outeredgep)
pos = move_from_center(pos, centers, labeldeltas, axmask)
olx, oly, olz = proj3d.proj_transform(pos[0], pos[1], pos[2], renderer.M)
self.offsetText.set_text( self.major.formatter.get_offset() )
self.offsetText.set_position( (olx, oly) )
angle = art3d.norm_text_angle(math.degrees(math.atan2(dy, dx)))
self.offsetText.set_rotation(angle)
# Must set rotation mode to "anchor" so that
# the alignment point is used as the "fulcrum" for rotation.
self.offsetText.set_rotation_mode('anchor')
#-----------------------------------------------------------------------
# Note: the following statement for determining the proper alignment of
# the offset text. This was determined entirely by trial-and-error
# and should not be in any way considered as "the way". There are
# still some edge cases where alignment is not quite right, but
# this seems to be more of a geometry issue (in other words, I
# might be using the wrong reference points).
#
# (TT, FF, TF, FT) are the shorthand for the tuple of
# (centpt[info['tickdir']] <= peparray[info['tickdir'], outerindex],
# centpt[index] <= peparray[index, outerindex])
#
# Three-letters (e.g., TFT, FTT) are short-hand for the array
# of bools from the variable 'highs'.
# ---------------------------------------------------------------------
if centpt[info['tickdir']] > peparray[info['tickdir'], outerindex] :
# if FT and if highs has an even number of Trues
if (centpt[index] <= peparray[index, outerindex]
and ((len(highs.nonzero()[0]) % 2) == 0)) :
# Usually, this means align right, except for the FTT case,
# in which offset for axis 1 and 2 are aligned left.
if highs.tolist() == [False, True, True] and index in (1, 2) :
align = 'left'
else :
align = 'right'
else :
# The FF case
align = 'left'
else :
# if TF and if highs has an even number of Trues
if (centpt[index] > peparray[index, outerindex]
and ((len(highs.nonzero()[0]) % 2) == 0)) :
# Usually mean align left, except if it is axis 2
if index == 2 :
align = 'right'
else :
align = 'left'
else :
# The TT case
align = 'right'
self.offsetText.set_va('center')
self.offsetText.set_ha(align)
self.offsetText.draw(renderer)
# Draw grid lines
if len(xyz0) > 0:
# Grid points at end of one plane
xyz1 = copy.deepcopy(xyz0)
newindex = (index + 1) % 3
newval = get_flip_min_max(xyz1[0], newindex, mins, maxs)
for i in range(len(majorLocs)):
xyz1[i][newindex] = newval
# Grid points at end of the other plane
xyz2 = copy.deepcopy(xyz0)
newindex = (index + 2) % 3
newval = get_flip_min_max(xyz2[0], newindex, mins, maxs)
for i in range(len(majorLocs)):
xyz2[i][newindex] = newval
lines = list(zip(xyz1, xyz0, xyz2))
if self.axes._draw_grid:
self.gridlines.set_segments(lines)
self.gridlines.set_color([info['grid']['color']] * len(lines))
self.gridlines.draw(renderer, project=True)
# Draw ticks
tickdir = info['tickdir']
tickdelta = deltas[tickdir]
if highs[tickdir]:
ticksign = 1
else:
ticksign = -1
for tick, loc, label in zip(majorTicks, majorLocs, majorLabels):
if tick is None:
continue
# Get tick line positions
pos = copy.copy(edgep1)
pos[index] = loc
pos[tickdir] = edgep1[tickdir] + info['tick']['outward_factor'] * \
ticksign * tickdelta
x1, y1, z1 = proj3d.proj_transform(pos[0], pos[1], pos[2], \
renderer.M)
pos[tickdir] = edgep1[tickdir] - info['tick']['inward_factor'] * \
ticksign * tickdelta
x2, y2, z2 = proj3d.proj_transform(pos[0], pos[1], pos[2], \
renderer.M)
# Get position of label
labeldeltas = [info['ticklabel']['space_factor'] * x for
x in deltas]
axmask = [True, True, True]
axmask[index] = False
pos[tickdir] = edgep1[tickdir]
pos = move_from_center(pos, centers, labeldeltas, axmask)
lx, ly, lz = proj3d.proj_transform(pos[0], pos[1], pos[2], \
renderer.M)
tick_update_position(tick, (x1, x2), (y1, y2), (lx, ly))
tick.set_label1(label)
tick.set_label2(label)
tick.draw(renderer)
renderer.close_group('axis3d')
def get_view_interval(self):
"""return the Interval instance for this 3d axis view limits"""
return self.v_interval
def set_view_interval(self, vmin, vmax, ignore=False):
if ignore:
self.v_interval = vmin, vmax
else:
Vmin, Vmax = self.get_view_interval()
self.v_interval = min(vmin, Vmin), max(vmax, Vmax)
# TODO: Get this to work properly when mplot3d supports
# the transforms framework.
def get_tightbbox(self, renderer) :
# Currently returns None so that Axis.get_tightbbox
# doesn't return junk info.
return None
# Use classes to look at different data limits
class XAxis(Axis):
def get_data_interval(self):
'return the Interval instance for this axis data limits'
return self.axes.xy_dataLim.intervalx
class YAxis(Axis):
def get_data_interval(self):
'return the Interval instance for this axis data limits'
return self.axes.xy_dataLim.intervaly
class ZAxis(Axis):
def get_data_interval(self):
'return the Interval instance for this axis data limits'
return self.axes.zz_dataLim.intervalx
| gpl-2.0 |
lucabaldini/ximpol | ximpol/examples/crab_complex_mdp.py | 1 | 9671 | #!/usr/bin/env python
#
# Copyright (C) 2016, the ximpol team.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import os
import numpy
from ximpol import XIMPOL_CONFIG, XIMPOL_DATA, XIMPOL_DOC
from ximpol.utils.logging_ import logger
from ximpol.core.pipeline import xPipeline
from ximpol.evt.binning import xBinnedModulationCube, xEventBinningBase
from ximpol.evt.event import xEventFile
from ximpol.utils.matplotlib_ import pyplot as plt
from ximpol.utils.matplotlib_ import save_current_figure
from ximpol.config.crab_pulsar import pol_degree_spline, pol_angle_spline,\
pl_index_spline, pl_normalization_spline
"""Script-wide simulation and analysis settings.
"""
CFG_FILE_PATH = os.path.join(XIMPOL_CONFIG, 'crab_complex.py')
OUT_FILE_PATH_BASE = os.path.join(XIMPOL_DATA, 'crab_complex')
#PULSAR_CFG_FILE_PATH = os.path.join(XIMPOL_CONFIG, 'crab_pulsar.py')
#PULSAR_OUT_FILE_PATH_BASE = os.path.join(XIMPOL_DATA, 'crab_pulsar')
#NEBULA_CFG_FILE_PATH = os.path.join(XIMPOL_CONFIG, 'crab_nebula.py')
#NEBULA_OUT_FILE_PATH_BASE = os.path.join(XIMPOL_DATA, 'crab_nebula')
EVT_FILE_PATH = '%s.fits' % OUT_FILE_PATH_BASE
#NEBULA_EVT_FILE_PATH = '%s.fits' % NEBULA_OUT_FILE_PATH_BASE
NEBULA_SELECTED_FILE_PATH = '%s_nebula_selected.fits' % OUT_FILE_PATH_BASE
NEBULA_MCUBE_FILE_PATH = '%s_nebula_mcube.fits' % OUT_FILE_PATH_BASE
SIM_DURATION = 10000
PHASE_BINS = [(0.,0.05), (0.05,0.25), (0.25, 0.45), (0.45, 0.9), (0.9,1.0)]
#NUM_PHASE_BINS = 25
#PHASE_BINS = numpy.linspace(0., 1., NUM_PHASE_BINS)
E_BINNING = [1.0, 3., 5., 8., 10.]
#E_BINNING = numpy.linspace(1., 10., 5)
OUTPUT_FOLDER = '/data/work/ximpol/ximpol/examples/'
MDP_OUTPUT_FILE = os.path.join(OUTPUT_FOLDER,'MDP_CrabPulsar_imaging_fE.txt')
"""Main pipeline object.
"""
PIPELINE = xPipeline(clobber=False)
def _sel_file_path(i):
"""Return the path to the i-th xpselct output file.
"""
return '%s_phase%04d.fits' % (OUT_FILE_PATH_BASE, i)
def _mcube_file_path(i):
"""Return the path to the i-th xpbin MCUBE output file.
"""
return '%s_phase%04d_mcube.fits' % (OUT_FILE_PATH_BASE, i)
def generate():
"""Generate the events.
"""
PIPELINE.xpobssim(configfile=CFG_FILE_PATH, duration=SIM_DURATION,
outfile=EVT_FILE_PATH)
def prepare_pulsar():
"""Prepare the event data for the actual analysis.
"""
for i, (_min, _max) in enumerate(PHASE_BINS):
#zip(PHASE_BINS[:-1],PHASE_BINS[1:])):
PIPELINE.xpselect(EVT_FILE_PATH, phasemin=_min,
phasemax=_max, mcsrcid=1,rad=0.25,
outfile=_sel_file_path(i))
PIPELINE.xpbin(_sel_file_path(i), algorithm='MCUBE', ebinalg='LIST',
ebinning=E_BINNING, outfile=_mcube_file_path(i))
def prepare_nebula():
"""Prepare the event data for the actual analysis.
"""
PIPELINE.xpselect(EVT_FILE_PATH, mcsrcid=0, rad=0.25, \
outfile=NEBULA_SELECTED_FILE_PATH)
PIPELINE.xpbin(NEBULA_SELECTED_FILE_PATH, algorithm='MCUBE', ebinalg='LIST',
ebinning=E_BINNING, outfile=NEBULA_MCUBE_FILE_PATH)
def calcMDP(plot=False):
mdp_file = open(MDP_OUTPUT_FILE,'w')
mdp_file.write('#Simulation time %s sec \n'%SIM_DURATION)
mdp_file.write('#Phase Ave delta phase Mean energy delta energy mdp99\n')
nebula_mcube_file = xBinnedModulationCube(NEBULA_MCUBE_FILE_PATH)
nebula_counts = nebula_mcube_file.counts
nebula_mdp = nebula_mcube_file.mdp99
txt = "Pulsar phase\t Emin - Emax\t Pulsar counts\t Nebula counts\t MDP\n"
MDP99_PULSAR = []
phase = []
phase_err = []
for i, (_min, _max) in enumerate(PHASE_BINS):
#zip(PHASE_BINS[:-1],PHASE_BINS[1:])):
pulse_diff = numpy.fabs(_max -_min)
_phase_ave = 0.5*(_min + _max)
phase.append(_phase_ave)
_phase_err = 0.5*(_max - _min)
phase_err.append(_phase_err)
pulsar_phase_mcube_file = xBinnedModulationCube(_mcube_file_path(i))
pulsar_emean = pulsar_phase_mcube_file.emean
for j, _energy_mean in enumerate(pulsar_emean):
pulsar_emin = pulsar_phase_mcube_file.emin[j]
pulsar_emax = pulsar_phase_mcube_file.emax[j]
pulsar_e_err = 0.5*(pulsar_emax-pulsar_emin)
pulsar_phase_counts = pulsar_phase_mcube_file.counts[j]
pulsar_mdp = pulsar_phase_mcube_file.mdp99[j]
#scale the nebula counts for the time used for the pulsar phase
scaled_nebula_counts = pulse_diff*nebula_counts[j]
count_sqrt = numpy.sqrt(pulsar_phase_counts + scaled_nebula_counts)
eff_mu_pulsar = pulsar_phase_mcube_file.effective_mu[j]
mdp = 4.292/eff_mu_pulsar*count_sqrt/pulsar_phase_counts
MDP99_PULSAR.append(100*mdp)
_data = (_phase_ave, _phase_err, _energy_mean, pulsar_e_err, pulsar_e_err, mdp)
_fmt = ('%.4e ' * len(_data)).strip()
_fmt = '%s\n' % _fmt
_line = _fmt % _data
mdp_file.write(_line)
#txt += "%s\t %s - %s\t %s\t %s\t %.3f\n"%(PHASE_BINS[i], pulsar_emin[0], pulsar_emax[0], pulsar_phase_counts[0], scaled_nebula_counts[0], 100*mdp)
MDP99_PULSAR = numpy.array(MDP99_PULSAR)
PHASE = numpy.array(phase)
mdp_file.close()
if plot:
scale_factor = 10
sim_label = 'XIPE %s ks' % (SIM_DURATION*scale_factor/1000.)
lc_label = 'Light curve'
plt.errorbar(PHASE, MDP99_PULSAR*(1/numpy.sqrt(10)),xerr=phase_err, label=sim_label,fmt='o')
pl_normalization_spline.plot(scale=10., show=False,
color='lightgray',label=lc_label)
plt.ylabel('MDP 99\%')
plt.legend()
plt.savefig('crab_complex_mdp_nonimaging_%i.png'%(SIM_DURATION*scale_factor/1000.))
#plt.show()
print txt
def makeMDPComparisonPlot(imaging_file_path):
non_imaging_file_path = imaging_file_path.replace('_imaging.txt','_non_imaging.txt')
print "Using %s for non imaging file path"%non_imaging_file_path
_phase_ave, _phase_err, mdp_imaging = numpy.loadtxt(imaging_file_path, unpack=True)
_phase_ave, _phase_err, mdp_nonimaging = numpy.loadtxt(non_imaging_file_path, unpack=True)
scale_factor = 10.
print "Improvement with imaging"
for i, phase in enumerate(_phase_ave):
imaging = 100*mdp_imaging[i]*(1/numpy.sqrt(scale_factor))
non_imaging = 100*mdp_nonimaging[i]*(1/numpy.sqrt(scale_factor))
print "%s\t Imaging (non):%s (%s) %s"%(phase,imaging,non_imaging,non_imaging/imaging)
sim_label_imaging = 'XIPE %s ks\n Imaging 15"' % (SIM_DURATION*scale_factor/1000.)
sim_label_nonimaging = 'Non imaging'
lc_label = 'Light curve'
plt.errorbar(_phase_ave, 100*mdp_imaging*(1/numpy.sqrt(scale_factor)),xerr=_phase_err, label=sim_label_imaging,fmt='o',markersize=6)
plt.errorbar(_phase_ave, 100*mdp_nonimaging*(1/numpy.sqrt(scale_factor)),xerr=_phase_err, label=sim_label_nonimaging,fmt='v',markersize=6)
pl_normalization_spline.plot(scale=10., show=False,
color='darkgray',label=lc_label)
#on_phase = 0.25, 0.45
#off_phase = 0.45,0.9
plt.axvspan(0.25, 0.45, color='r', alpha=0.45, lw=0)
plt.axvspan(0.45, 0.9, color='gray', alpha=0.25, lw=0)
plt.ylabel('MDP 99\%')
plt.legend()
plt.savefig('crab_complex_mdp_imaging_vs_nonimaging_%i_shaded.png'%(SIM_DURATION*scale_factor/1000.))
plt.show()
def makeMDP_fE_ComparisonPlot(file_path):
scale_factor = 10.
(_phase_ave, _phase_err, _energy_mean, pulsar_e_err, pulsar_e_err, mdp) =\
numpy.loadtxt(file_path, unpack=True)
print "Phase ave:",_phase_ave
print
print "Energy mean", _energy_mean
#phase_values = [0.025, 0.15, 0.35, 0.675, 0.95]
phase_values = [0.35,0.675]
on, on_phase_color = (0.35,'r')
off, off_phase_color = (0.675,'gray')
#for phase in phase_values:
plt.errorbar(_energy_mean[_phase_ave==on], 100*mdp[_phase_ave==on]*(1/numpy.sqrt(scale_factor)),xerr=pulsar_e_err[_phase_ave==on], label='On Phase',fmt='o',markersize=6,ls='--',color=on_phase_color)
plt.errorbar(_energy_mean[_phase_ave==off], 100*mdp[_phase_ave==off]*(1/numpy.sqrt(scale_factor)),xerr=pulsar_e_err[_phase_ave==off], label='Off Phase',fmt='o',markersize=6,ls='--',color=off_phase_color)
plt.legend()
plt.ylabel('MPD 99\%')
plt.xlabel('Energy (keV)')
plt.savefig('crab_complex_mdp_imaging_fE_%i.png'%(SIM_DURATION*scale_factor/1000.))
plt.show()
if __name__=='__main__':
#generate()
#prepare_pulsar()
#prepare_nebula()
#calcMDP(plot=False)
makeMDPComparisonPlot('MDP_CrabPulsar_imaging.txt')
makeMDP_fE_ComparisonPlot('MDP_CrabPulsar_imaging_fE.txt')
| gpl-3.0 |
mikebenfield/scipy | scipy/interpolate/_cubic.py | 37 | 29281 | """Interpolation algorithms using piecewise cubic polynomials."""
from __future__ import division, print_function, absolute_import
import numpy as np
from scipy._lib.six import string_types
from . import BPoly, PPoly
from .polyint import _isscalar
from scipy._lib._util import _asarray_validated
from scipy.linalg import solve_banded, solve
__all__ = ["PchipInterpolator", "pchip_interpolate", "pchip",
"Akima1DInterpolator", "CubicSpline"]
class PchipInterpolator(BPoly):
r"""PCHIP 1-d monotonic cubic interpolation.
`x` and `y` are arrays of values used to approximate some function f,
with ``y = f(x)``. The interpolant uses monotonic cubic splines
to find the value of new points. (PCHIP stands for Piecewise Cubic
Hermite Interpolating Polynomial).
Parameters
----------
x : ndarray
A 1-D array of monotonically increasing real values. `x` cannot
include duplicate values (otherwise f is overspecified)
y : ndarray
A 1-D array of real values. `y`'s length along the interpolation
axis must be equal to the length of `x`. If N-D array, use `axis`
parameter to select correct axis.
axis : int, optional
Axis in the y array corresponding to the x-coordinate values.
extrapolate : bool, optional
Whether to extrapolate to out-of-bounds points based on first
and last intervals, or to return NaNs.
Methods
-------
__call__
derivative
antiderivative
roots
See Also
--------
Akima1DInterpolator
CubicSpline
BPoly
Notes
-----
The interpolator preserves monotonicity in the interpolation data and does
not overshoot if the data is not smooth.
The first derivatives are guaranteed to be continuous, but the second
derivatives may jump at :math:`x_k`.
Determines the derivatives at the points :math:`x_k`, :math:`f'_k`,
by using PCHIP algorithm [1]_.
Let :math:`h_k = x_{k+1} - x_k`, and :math:`d_k = (y_{k+1} - y_k) / h_k`
are the slopes at internal points :math:`x_k`.
If the signs of :math:`d_k` and :math:`d_{k-1}` are different or either of
them equals zero, then :math:`f'_k = 0`. Otherwise, it is given by the
weighted harmonic mean
.. math::
\frac{w_1 + w_2}{f'_k} = \frac{w_1}{d_{k-1}} + \frac{w_2}{d_k}
where :math:`w_1 = 2 h_k + h_{k-1}` and :math:`w_2 = h_k + 2 h_{k-1}`.
The end slopes are set using a one-sided scheme [2]_.
References
----------
.. [1] F. N. Fritsch and R. E. Carlson, Monotone Piecewise Cubic Interpolation,
SIAM J. Numer. Anal., 17(2), 238 (1980).
:doi:`10.1137/0717021`.
.. [2] see, e.g., C. Moler, Numerical Computing with Matlab, 2004.
:doi:`10.1137/1.9780898717952`
"""
def __init__(self, x, y, axis=0, extrapolate=None):
x = _asarray_validated(x, check_finite=False, as_inexact=True)
y = _asarray_validated(y, check_finite=False, as_inexact=True)
axis = axis % y.ndim
xp = x.reshape((x.shape[0],) + (1,)*(y.ndim-1))
yp = np.rollaxis(y, axis)
dk = self._find_derivatives(xp, yp)
data = np.hstack((yp[:, None, ...], dk[:, None, ...]))
_b = BPoly.from_derivatives(x, data, orders=None)
super(PchipInterpolator, self).__init__(_b.c, _b.x,
extrapolate=extrapolate)
self.axis = axis
def roots(self):
"""
Return the roots of the interpolated function.
"""
return (PPoly.from_bernstein_basis(self)).roots()
@staticmethod
def _edge_case(h0, h1, m0, m1):
# one-sided three-point estimate for the derivative
d = ((2*h0 + h1)*m0 - h0*m1) / (h0 + h1)
# try to preserve shape
mask = np.sign(d) != np.sign(m0)
mask2 = (np.sign(m0) != np.sign(m1)) & (np.abs(d) > 3.*np.abs(m0))
mmm = (~mask) & mask2
d[mask] = 0.
d[mmm] = 3.*m0[mmm]
return d
@staticmethod
def _find_derivatives(x, y):
# Determine the derivatives at the points y_k, d_k, by using
# PCHIP algorithm is:
# We choose the derivatives at the point x_k by
# Let m_k be the slope of the kth segment (between k and k+1)
# If m_k=0 or m_{k-1}=0 or sgn(m_k) != sgn(m_{k-1}) then d_k == 0
# else use weighted harmonic mean:
# w_1 = 2h_k + h_{k-1}, w_2 = h_k + 2h_{k-1}
# 1/d_k = 1/(w_1 + w_2)*(w_1 / m_k + w_2 / m_{k-1})
# where h_k is the spacing between x_k and x_{k+1}
y_shape = y.shape
if y.ndim == 1:
# So that _edge_case doesn't end up assigning to scalars
x = x[:, None]
y = y[:, None]
hk = x[1:] - x[:-1]
mk = (y[1:] - y[:-1]) / hk
if y.shape[0] == 2:
# edge case: only have two points, use linear interpolation
dk = np.zeros_like(y)
dk[0] = mk
dk[1] = mk
return dk.reshape(y_shape)
smk = np.sign(mk)
condition = (smk[1:] != smk[:-1]) | (mk[1:] == 0) | (mk[:-1] == 0)
w1 = 2*hk[1:] + hk[:-1]
w2 = hk[1:] + 2*hk[:-1]
# values where division by zero occurs will be excluded
# by 'condition' afterwards
with np.errstate(divide='ignore'):
whmean = (w1/mk[:-1] + w2/mk[1:]) / (w1 + w2)
dk = np.zeros_like(y)
dk[1:-1][condition] = 0.0
dk[1:-1][~condition] = 1.0 / whmean[~condition]
# special case endpoints, as suggested in
# Cleve Moler, Numerical Computing with MATLAB, Chap 3.4
dk[0] = PchipInterpolator._edge_case(hk[0], hk[1], mk[0], mk[1])
dk[-1] = PchipInterpolator._edge_case(hk[-1], hk[-2], mk[-1], mk[-2])
return dk.reshape(y_shape)
def pchip_interpolate(xi, yi, x, der=0, axis=0):
"""
Convenience function for pchip interpolation.
xi and yi are arrays of values used to approximate some function f,
with ``yi = f(xi)``. The interpolant uses monotonic cubic splines
to find the value of new points x and the derivatives there.
See `PchipInterpolator` for details.
Parameters
----------
xi : array_like
A sorted list of x-coordinates, of length N.
yi : array_like
A 1-D array of real values. `yi`'s length along the interpolation
axis must be equal to the length of `xi`. If N-D array, use axis
parameter to select correct axis.
x : scalar or array_like
Of length M.
der : int or list, optional
Derivatives to extract. The 0-th derivative can be included to
return the function value.
axis : int, optional
Axis in the yi array corresponding to the x-coordinate values.
See Also
--------
PchipInterpolator
Returns
-------
y : scalar or array_like
The result, of length R or length M or M by R,
"""
P = PchipInterpolator(xi, yi, axis=axis)
if der == 0:
return P(x)
elif _isscalar(der):
return P.derivative(der)(x)
else:
return [P.derivative(nu)(x) for nu in der]
# Backwards compatibility
pchip = PchipInterpolator
class Akima1DInterpolator(PPoly):
"""
Akima interpolator
Fit piecewise cubic polynomials, given vectors x and y. The interpolation
method by Akima uses a continuously differentiable sub-spline built from
piecewise cubic polynomials. The resultant curve passes through the given
data points and will appear smooth and natural.
Parameters
----------
x : ndarray, shape (m, )
1-D array of monotonically increasing real values.
y : ndarray, shape (m, ...)
N-D array of real values. The length of `y` along the first axis must
be equal to the length of `x`.
axis : int, optional
Specifies the axis of `y` along which to interpolate. Interpolation
defaults to the first axis of `y`.
Methods
-------
__call__
derivative
antiderivative
roots
See Also
--------
PchipInterpolator
CubicSpline
PPoly
Notes
-----
.. versionadded:: 0.14
Use only for precise data, as the fitted curve passes through the given
points exactly. This routine is useful for plotting a pleasingly smooth
curve through a few given points for purposes of plotting.
References
----------
[1] A new method of interpolation and smooth curve fitting based
on local procedures. Hiroshi Akima, J. ACM, October 1970, 17(4),
589-602.
"""
def __init__(self, x, y, axis=0):
# Original implementation in MATLAB by N. Shamsundar (BSD licensed), see
# http://www.mathworks.de/matlabcentral/fileexchange/1814-akima-interpolation
x, y = map(np.asarray, (x, y))
axis = axis % y.ndim
if np.any(np.diff(x) < 0.):
raise ValueError("x must be strictly ascending")
if x.ndim != 1:
raise ValueError("x must be 1-dimensional")
if x.size < 2:
raise ValueError("at least 2 breakpoints are needed")
if x.size != y.shape[axis]:
raise ValueError("x.shape must equal y.shape[%s]" % axis)
# move interpolation axis to front
y = np.rollaxis(y, axis)
# determine slopes between breakpoints
m = np.empty((x.size + 3, ) + y.shape[1:])
dx = np.diff(x)
dx = dx[(slice(None), ) + (None, ) * (y.ndim - 1)]
m[2:-2] = np.diff(y, axis=0) / dx
# add two additional points on the left ...
m[1] = 2. * m[2] - m[3]
m[0] = 2. * m[1] - m[2]
# ... and on the right
m[-2] = 2. * m[-3] - m[-4]
m[-1] = 2. * m[-2] - m[-3]
# if m1 == m2 != m3 == m4, the slope at the breakpoint is not defined.
# This is the fill value:
t = .5 * (m[3:] + m[:-3])
# get the denominator of the slope t
dm = np.abs(np.diff(m, axis=0))
f1 = dm[2:]
f2 = dm[:-2]
f12 = f1 + f2
# These are the mask of where the the slope at breakpoint is defined:
ind = np.nonzero(f12 > 1e-9 * np.max(f12))
x_ind, y_ind = ind[0], ind[1:]
# Set the slope at breakpoint
t[ind] = (f1[ind] * m[(x_ind + 1,) + y_ind] +
f2[ind] * m[(x_ind + 2,) + y_ind]) / f12[ind]
# calculate the higher order coefficients
c = (3. * m[2:-2] - 2. * t[:-1] - t[1:]) / dx
d = (t[:-1] + t[1:] - 2. * m[2:-2]) / dx ** 2
coeff = np.zeros((4, x.size - 1) + y.shape[1:])
coeff[3] = y[:-1]
coeff[2] = t[:-1]
coeff[1] = c
coeff[0] = d
super(Akima1DInterpolator, self).__init__(coeff, x, extrapolate=False)
self.axis = axis
def extend(self, c, x, right=True):
raise NotImplementedError("Extending a 1D Akima interpolator is not "
"yet implemented")
# These are inherited from PPoly, but they do not produce an Akima
# interpolator. Hence stub them out.
@classmethod
def from_spline(cls, tck, extrapolate=None):
raise NotImplementedError("This method does not make sense for "
"an Akima interpolator.")
@classmethod
def from_bernstein_basis(cls, bp, extrapolate=None):
raise NotImplementedError("This method does not make sense for "
"an Akima interpolator.")
class CubicSpline(PPoly):
"""Cubic spline data interpolator.
Interpolate data with a piecewise cubic polynomial which is twice
continuously differentiable [1]_. The result is represented as a `PPoly`
instance with breakpoints matching the given data.
Parameters
----------
x : array_like, shape (n,)
1-d array containing values of the independent variable.
Values must be real, finite and in strictly increasing order.
y : array_like
Array containing values of the dependent variable. It can have
arbitrary number of dimensions, but the length along `axis` (see below)
must match the length of `x`. Values must be finite.
axis : int, optional
Axis along which `y` is assumed to be varying. Meaning that for
``x[i]`` the corresponding values are ``np.take(y, i, axis=axis)``.
Default is 0.
bc_type : string or 2-tuple, optional
Boundary condition type. Two additional equations, given by the
boundary conditions, are required to determine all coefficients of
polynomials on each segment [2]_.
If `bc_type` is a string, then the specified condition will be applied
at both ends of a spline. Available conditions are:
* 'not-a-knot' (default): The first and second segment at a curve end
are the same polynomial. It is a good default when there is no
information on boundary conditions.
* 'periodic': The interpolated functions is assumed to be periodic
of period ``x[-1] - x[0]``. The first and last value of `y` must be
identical: ``y[0] == y[-1]``. This boundary condition will result in
``y'[0] == y'[-1]`` and ``y''[0] == y''[-1]``.
* 'clamped': The first derivative at curves ends are zero. Assuming
a 1D `y`, ``bc_type=((1, 0.0), (1, 0.0))`` is the same condition.
* 'natural': The second derivative at curve ends are zero. Assuming
a 1D `y`, ``bc_type=((2, 0.0), (2, 0.0))`` is the same condition.
If `bc_type` is a 2-tuple, the first and the second value will be
applied at the curve start and end respectively. The tuple values can
be one of the previously mentioned strings (except 'periodic') or a
tuple `(order, deriv_values)` allowing to specify arbitrary
derivatives at curve ends:
* `order`: the derivative order, 1 or 2.
* `deriv_value`: array_like containing derivative values, shape must
be the same as `y`, excluding `axis` dimension. For example, if `y`
is 1D, then `deriv_value` must be a scalar. If `y` is 3D with the
shape (n0, n1, n2) and axis=2, then `deriv_value` must be 2D
and have the shape (n0, n1).
extrapolate : {bool, 'periodic', None}, optional
If bool, determines whether to extrapolate to out-of-bounds points
based on first and last intervals, or to return NaNs. If 'periodic',
periodic extrapolation is used. If None (default), `extrapolate` is
set to 'periodic' for ``bc_type='periodic'`` and to True otherwise.
Attributes
----------
x : ndarray, shape (n,)
Breakpoints. The same `x` which was passed to the constructor.
c : ndarray, shape (4, n-1, ...)
Coefficients of the polynomials on each segment. The trailing
dimensions match the dimensions of `y`, excluding `axis`. For example,
if `y` is 1-d, then ``c[k, i]`` is a coefficient for
``(x-x[i])**(3-k)`` on the segment between ``x[i]`` and ``x[i+1]``.
axis : int
Interpolation axis. The same `axis` which was passed to the
constructor.
Methods
-------
__call__
derivative
antiderivative
integrate
roots
See Also
--------
Akima1DInterpolator
PchipInterpolator
PPoly
Notes
-----
Parameters `bc_type` and `interpolate` work independently, i.e. the former
controls only construction of a spline, and the latter only evaluation.
When a boundary condition is 'not-a-knot' and n = 2, it is replaced by
a condition that the first derivative is equal to the linear interpolant
slope. When both boundary conditions are 'not-a-knot' and n = 3, the
solution is sought as a parabola passing through given points.
When 'not-a-knot' boundary conditions is applied to both ends, the
resulting spline will be the same as returned by `splrep` (with ``s=0``)
and `InterpolatedUnivariateSpline`, but these two methods use a
representation in B-spline basis.
.. versionadded:: 0.18.0
Examples
--------
In this example the cubic spline is used to interpolate a sampled sinusoid.
You can see that the spline continuity property holds for the first and
second derivatives and violates only for the third derivative.
>>> from scipy.interpolate import CubicSpline
>>> import matplotlib.pyplot as plt
>>> x = np.arange(10)
>>> y = np.sin(x)
>>> cs = CubicSpline(x, y)
>>> xs = np.arange(-0.5, 9.6, 0.1)
>>> plt.figure(figsize=(6.5, 4))
>>> plt.plot(x, y, 'o', label='data')
>>> plt.plot(xs, np.sin(xs), label='true')
>>> plt.plot(xs, cs(xs), label="S")
>>> plt.plot(xs, cs(xs, 1), label="S'")
>>> plt.plot(xs, cs(xs, 2), label="S''")
>>> plt.plot(xs, cs(xs, 3), label="S'''")
>>> plt.xlim(-0.5, 9.5)
>>> plt.legend(loc='lower left', ncol=2)
>>> plt.show()
In the second example, the unit circle is interpolated with a spline. A
periodic boundary condition is used. You can see that the first derivative
values, ds/dx=0, ds/dy=1 at the periodic point (1, 0) are correctly
computed. Note that a circle cannot be exactly represented by a cubic
spline. To increase precision, more breakpoints would be required.
>>> theta = 2 * np.pi * np.linspace(0, 1, 5)
>>> y = np.c_[np.cos(theta), np.sin(theta)]
>>> cs = CubicSpline(theta, y, bc_type='periodic')
>>> print("ds/dx={:.1f} ds/dy={:.1f}".format(cs(0, 1)[0], cs(0, 1)[1]))
ds/dx=0.0 ds/dy=1.0
>>> xs = 2 * np.pi * np.linspace(0, 1, 100)
>>> plt.figure(figsize=(6.5, 4))
>>> plt.plot(y[:, 0], y[:, 1], 'o', label='data')
>>> plt.plot(np.cos(xs), np.sin(xs), label='true')
>>> plt.plot(cs(xs)[:, 0], cs(xs)[:, 1], label='spline')
>>> plt.axes().set_aspect('equal')
>>> plt.legend(loc='center')
>>> plt.show()
The third example is the interpolation of a polynomial y = x**3 on the
interval 0 <= x<= 1. A cubic spline can represent this function exactly.
To achieve that we need to specify values and first derivatives at
endpoints of the interval. Note that y' = 3 * x**2 and thus y'(0) = 0 and
y'(1) = 3.
>>> cs = CubicSpline([0, 1], [0, 1], bc_type=((1, 0), (1, 3)))
>>> x = np.linspace(0, 1)
>>> np.allclose(x**3, cs(x))
True
References
----------
.. [1] `Cubic Spline Interpolation
<https://en.wikiversity.org/wiki/Cubic_Spline_Interpolation>`_
on Wikiversity.
.. [2] Carl de Boor, "A Practical Guide to Splines", Springer-Verlag, 1978.
"""
def __init__(self, x, y, axis=0, bc_type='not-a-knot', extrapolate=None):
x, y = map(np.asarray, (x, y))
if np.issubdtype(x.dtype, np.complexfloating):
raise ValueError("`x` must contain real values.")
if np.issubdtype(y.dtype, np.complexfloating):
dtype = complex
else:
dtype = float
y = y.astype(dtype, copy=False)
axis = axis % y.ndim
if x.ndim != 1:
raise ValueError("`x` must be 1-dimensional.")
if x.shape[0] < 2:
raise ValueError("`x` must contain at least 2 elements.")
if x.shape[0] != y.shape[axis]:
raise ValueError("The length of `y` along `axis`={0} doesn't "
"match the length of `x`".format(axis))
if not np.all(np.isfinite(x)):
raise ValueError("`x` must contain only finite values.")
if not np.all(np.isfinite(y)):
raise ValueError("`y` must contain only finite values.")
dx = np.diff(x)
if np.any(dx <= 0):
raise ValueError("`x` must be strictly increasing sequence.")
n = x.shape[0]
y = np.rollaxis(y, axis)
bc, y = self._validate_bc(bc_type, y, y.shape[1:], axis)
if extrapolate is None:
if bc[0] == 'periodic':
extrapolate = 'periodic'
else:
extrapolate = True
dxr = dx.reshape([dx.shape[0]] + [1] * (y.ndim - 1))
slope = np.diff(y, axis=0) / dxr
# If bc is 'not-a-knot' this change is just a convention.
# If bc is 'periodic' then we already checked that y[0] == y[-1],
# and the spline is just a constant, we handle this case in the same
# way by setting the first derivatives to slope, which is 0.
if n == 2:
if bc[0] in ['not-a-knot', 'periodic']:
bc[0] = (1, slope[0])
if bc[1] in ['not-a-knot', 'periodic']:
bc[1] = (1, slope[0])
# This is a very special case, when both conditions are 'not-a-knot'
# and n == 3. In this case 'not-a-knot' can't be handled regularly
# as the both conditions are identical. We handle this case by
# constructing a parabola passing through given points.
if n == 3 and bc[0] == 'not-a-knot' and bc[1] == 'not-a-knot':
A = np.zeros((3, 3)) # This is a standard matrix.
b = np.empty((3,) + y.shape[1:], dtype=y.dtype)
A[0, 0] = 1
A[0, 1] = 1
A[1, 0] = dx[1]
A[1, 1] = 2 * (dx[0] + dx[1])
A[1, 2] = dx[0]
A[2, 1] = 1
A[2, 2] = 1
b[0] = 2 * slope[0]
b[1] = 3 * (dxr[0] * slope[1] + dxr[1] * slope[0])
b[2] = 2 * slope[1]
s = solve(A, b, overwrite_a=True, overwrite_b=True,
check_finite=False)
else:
# Find derivative values at each x[i] by solving a tridiagonal
# system.
A = np.zeros((3, n)) # This is a banded matrix representation.
b = np.empty((n,) + y.shape[1:], dtype=y.dtype)
# Filling the system for i=1..n-2
# (x[i-1] - x[i]) * s[i-1] +\
# 2 * ((x[i] - x[i-1]) + (x[i+1] - x[i])) * s[i] +\
# (x[i] - x[i-1]) * s[i+1] =\
# 3 * ((x[i+1] - x[i])*(y[i] - y[i-1])/(x[i] - x[i-1]) +\
# (x[i] - x[i-1])*(y[i+1] - y[i])/(x[i+1] - x[i]))
A[1, 1:-1] = 2 * (dx[:-1] + dx[1:]) # The diagonal
A[0, 2:] = dx[:-1] # The upper diagonal
A[-1, :-2] = dx[1:] # The lower diagonal
b[1:-1] = 3 * (dxr[1:] * slope[:-1] + dxr[:-1] * slope[1:])
bc_start, bc_end = bc
if bc_start == 'periodic':
# Due to the periodicity, and because y[-1] = y[0], the linear
# system has (n-1) unknowns/equations instead of n:
A = A[:, 0:-1]
A[1, 0] = 2 * (dx[-1] + dx[0])
A[0, 1] = dx[-1]
b = b[:-1]
# Also, due to the periodicity, the system is not tri-diagonal.
# We need to compute a "condensed" matrix of shape (n-2, n-2).
# See http://www.cfm.brown.edu/people/gk/chap6/node14.html for
# more explanations.
# The condensed matrix is obtained by removing the last column
# and last row of the (n-1, n-1) system matrix. The removed
# values are saved in scalar variables with the (n-1, n-1)
# system matrix indices forming their names:
a_m1_0 = dx[-2] # lower left corner value: A[-1, 0]
a_m1_m2 = dx[-1]
a_m1_m1 = 2 * (dx[-1] + dx[-2])
a_m2_m1 = dx[-2]
a_0_m1 = dx[0]
b[0] = 3 * (dxr[0] * slope[-1] + dxr[-1] * slope[0])
b[-1] = 3 * (dxr[-1] * slope[-2] + dxr[-2] * slope[-1])
Ac = A[:, :-1]
b1 = b[:-1]
b2 = np.zeros_like(b1)
b2[0] = -a_0_m1
b2[-1] = -a_m2_m1
# s1 and s2 are the solutions of (n-2, n-2) system
s1 = solve_banded((1, 1), Ac, b1, overwrite_ab=False,
overwrite_b=False, check_finite=False)
s2 = solve_banded((1, 1), Ac, b2, overwrite_ab=False,
overwrite_b=False, check_finite=False)
# computing the s[n-2] solution:
s_m1 = ((b[-1] - a_m1_0 * s1[0] - a_m1_m2 * s1[-1]) /
(a_m1_m1 + a_m1_0 * s2[0] + a_m1_m2 * s2[-1]))
# s is the solution of the (n, n) system:
s = np.empty((n,) + y.shape[1:], dtype=y.dtype)
s[:-2] = s1 + s_m1 * s2
s[-2] = s_m1
s[-1] = s[0]
else:
if bc_start == 'not-a-knot':
A[1, 0] = dx[1]
A[0, 1] = x[2] - x[0]
d = x[2] - x[0]
b[0] = ((dxr[0] + 2*d) * dxr[1] * slope[0] +
dxr[0]**2 * slope[1]) / d
elif bc_start[0] == 1:
A[1, 0] = 1
A[0, 1] = 0
b[0] = bc_start[1]
elif bc_start[0] == 2:
A[1, 0] = 2 * dx[0]
A[0, 1] = dx[0]
b[0] = -0.5 * bc_start[1] * dx[0]**2 + 3 * (y[1] - y[0])
if bc_end == 'not-a-knot':
A[1, -1] = dx[-2]
A[-1, -2] = x[-1] - x[-3]
d = x[-1] - x[-3]
b[-1] = ((dxr[-1]**2*slope[-2] +
(2*d + dxr[-1])*dxr[-2]*slope[-1]) / d)
elif bc_end[0] == 1:
A[1, -1] = 1
A[-1, -2] = 0
b[-1] = bc_end[1]
elif bc_end[0] == 2:
A[1, -1] = 2 * dx[-1]
A[-1, -2] = dx[-1]
b[-1] = 0.5 * bc_end[1] * dx[-1]**2 + 3 * (y[-1] - y[-2])
s = solve_banded((1, 1), A, b, overwrite_ab=True,
overwrite_b=True, check_finite=False)
# Compute coefficients in PPoly form.
t = (s[:-1] + s[1:] - 2 * slope) / dxr
c = np.empty((4, n - 1) + y.shape[1:], dtype=t.dtype)
c[0] = t / dxr
c[1] = (slope - s[:-1]) / dxr - t
c[2] = s[:-1]
c[3] = y[:-1]
super(CubicSpline, self).__init__(c, x, extrapolate=extrapolate)
self.axis = axis
@staticmethod
def _validate_bc(bc_type, y, expected_deriv_shape, axis):
"""Validate and prepare boundary conditions.
Returns
-------
validated_bc : 2-tuple
Boundary conditions for a curve start and end.
y : ndarray
y casted to complex dtype if one of the boundary conditions has
complex dtype.
"""
if isinstance(bc_type, string_types):
if bc_type == 'periodic':
if not np.allclose(y[0], y[-1], rtol=1e-15, atol=1e-15):
raise ValueError(
"The first and last `y` point along axis {} must "
"be identical (within machine precision) when "
"bc_type='periodic'.".format(axis))
bc_type = (bc_type, bc_type)
else:
if len(bc_type) != 2:
raise ValueError("`bc_type` must contain 2 elements to "
"specify start and end conditions.")
if 'periodic' in bc_type:
raise ValueError("'periodic' `bc_type` is defined for both "
"curve ends and cannot be used with other "
"boundary conditions.")
validated_bc = []
for bc in bc_type:
if isinstance(bc, string_types):
if bc == 'clamped':
validated_bc.append((1, np.zeros(expected_deriv_shape)))
elif bc == 'natural':
validated_bc.append((2, np.zeros(expected_deriv_shape)))
elif bc in ['not-a-knot', 'periodic']:
validated_bc.append(bc)
else:
raise ValueError("bc_type={} is not allowed.".format(bc))
else:
try:
deriv_order, deriv_value = bc
except Exception:
raise ValueError("A specified derivative value must be "
"given in the form (order, value).")
if deriv_order not in [1, 2]:
raise ValueError("The specified derivative order must "
"be 1 or 2.")
deriv_value = np.asarray(deriv_value)
if deriv_value.shape != expected_deriv_shape:
raise ValueError(
"`deriv_value` shape {} is not the expected one {}."
.format(deriv_value.shape, expected_deriv_shape))
if np.issubdtype(deriv_value.dtype, np.complexfloating):
y = y.astype(complex, copy=False)
validated_bc.append((deriv_order, deriv_value))
return validated_bc, y
| bsd-3-clause |
mrkm4ntr/incubator-airflow | airflow/providers/presto/hooks/presto.py | 2 | 7336 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
from typing import Any, Iterable, Optional
import prestodb
from prestodb.exceptions import DatabaseError
from prestodb.transaction import IsolationLevel
from airflow import AirflowException
from airflow.configuration import conf
from airflow.hooks.dbapi_hook import DbApiHook
from airflow.models import Connection
class PrestoException(Exception):
"""Presto exception"""
def _boolify(value):
if isinstance(value, bool):
return value
if isinstance(value, str):
if value.lower() == 'false':
return False
elif value.lower() == 'true':
return True
return value
class PrestoHook(DbApiHook):
"""
Interact with Presto through prestodb.
>>> ph = PrestoHook()
>>> sql = "SELECT count(1) AS num FROM airflow.static_babynames"
>>> ph.get_records(sql)
[[340698]]
"""
conn_name_attr = 'presto_conn_id'
default_conn_name = 'presto_default'
def get_conn(self) -> Connection:
"""Returns a connection object"""
db = self.get_connection(
self.presto_conn_id # type: ignore[attr-defined] # pylint: disable=no-member
)
extra = db.extra_dejson
auth = None
if db.password and extra.get('auth') == 'kerberos':
raise AirflowException("Kerberos authorization doesn't support password.")
elif db.password:
auth = prestodb.auth.BasicAuthentication(db.login, db.password)
elif extra.get('auth') == 'kerberos':
auth = prestodb.auth.KerberosAuthentication(
config=extra.get('kerberos__config', os.environ.get('KRB5_CONFIG')),
service_name=extra.get('kerberos__service_name'),
mutual_authentication=_boolify(extra.get('kerberos__mutual_authentication', False)),
force_preemptive=_boolify(extra.get('kerberos__force_preemptive', False)),
hostname_override=extra.get('kerberos__hostname_override'),
sanitize_mutual_error_response=_boolify(
extra.get('kerberos__sanitize_mutual_error_response', True)
),
principal=extra.get('kerberos__principal', conf.get('kerberos', 'principal')),
delegate=_boolify(extra.get('kerberos__delegate', False)),
ca_bundle=extra.get('kerberos__ca_bundle'),
)
presto_conn = prestodb.dbapi.connect(
host=db.host,
port=db.port,
user=db.login,
source=db.extra_dejson.get('source', 'airflow'),
http_scheme=db.extra_dejson.get('protocol', 'http'),
catalog=db.extra_dejson.get('catalog', 'hive'),
schema=db.schema,
auth=auth,
isolation_level=self.get_isolation_level(), # type: ignore[func-returns-value]
)
if extra.get('verify') is not None:
# Unfortunately verify parameter is available via public API.
# The PR is merged in the presto library, but has not been released.
# See: https://github.com/prestosql/presto-python-client/pull/31
presto_conn._http_session.verify = _boolify(extra['verify']) # pylint: disable=protected-access
return presto_conn
def get_isolation_level(self) -> Any:
"""Returns an isolation level"""
db = self.get_connection(
self.presto_conn_id # type: ignore[attr-defined] # pylint: disable=no-member
)
isolation_level = db.extra_dejson.get('isolation_level', 'AUTOCOMMIT').upper()
return getattr(IsolationLevel, isolation_level, IsolationLevel.AUTOCOMMIT)
@staticmethod
def _strip_sql(sql: str) -> str:
return sql.strip().rstrip(';')
def get_records(self, hql, parameters: Optional[dict] = None):
"""Get a set of records from Presto"""
try:
return super().get_records(self._strip_sql(hql), parameters)
except DatabaseError as e:
raise PrestoException(e)
def get_first(self, hql: str, parameters: Optional[dict] = None) -> Any:
"""Returns only the first row, regardless of how many rows the query returns."""
try:
return super().get_first(self._strip_sql(hql), parameters)
except DatabaseError as e:
raise PrestoException(e)
def get_pandas_df(self, hql, parameters=None, **kwargs):
"""Get a pandas dataframe from a sql query."""
import pandas
cursor = self.get_cursor()
try:
cursor.execute(self._strip_sql(hql), parameters)
data = cursor.fetchall()
except DatabaseError as e:
raise PrestoException(e)
column_descriptions = cursor.description
if data:
df = pandas.DataFrame(data, **kwargs)
df.columns = [c[0] for c in column_descriptions]
else:
df = pandas.DataFrame(**kwargs)
return df
def run(
self,
hql,
autocommit: bool = False,
parameters: Optional[dict] = None,
) -> None:
"""Execute the statement against Presto. Can be used to create views."""
return super().run(sql=self._strip_sql(hql), parameters=parameters)
def insert_rows(
self,
table: str,
rows: Iterable[tuple],
target_fields: Optional[Iterable[str]] = None,
commit_every: int = 0,
replace: bool = False,
**kwargs,
) -> None:
"""
A generic way to insert a set of tuples into a table.
:param table: Name of the target table
:type table: str
:param rows: The rows to insert into the table
:type rows: iterable of tuples
:param target_fields: The names of the columns to fill in the table
:type target_fields: iterable of strings
:param commit_every: The maximum number of rows to insert in one
transaction. Set to 0 to insert all rows in one transaction.
:type commit_every: int
:param replace: Whether to replace instead of insert
:type replace: bool
"""
if self.get_isolation_level() == IsolationLevel.AUTOCOMMIT:
self.log.info(
'Transactions are not enable in presto connection. '
'Please use the isolation_level property to enable it. '
'Falling back to insert all rows in one transaction.'
)
commit_every = 0
super().insert_rows(table, rows, target_fields, commit_every)
| apache-2.0 |
RayMick/scikit-learn | sklearn/tests/test_common.py | 70 | 7717 | """
General tests for all estimators in sklearn.
"""
# Authors: Andreas Mueller <[email protected]>
# Gael Varoquaux [email protected]
# License: BSD 3 clause
from __future__ import print_function
import os
import warnings
import sys
import pkgutil
from sklearn.externals.six import PY3
from sklearn.utils.testing import assert_false, clean_warning_registry
from sklearn.utils.testing import all_estimators
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import ignore_warnings
import sklearn
from sklearn.cluster.bicluster import BiclusterMixin
from sklearn.linear_model.base import LinearClassifierMixin
from sklearn.utils.estimator_checks import (
_yield_all_checks,
CROSS_DECOMPOSITION,
check_parameters_default_constructible,
check_class_weight_balanced_linear_classifier,
check_transformer_n_iter,
check_non_transformer_estimators_n_iter,
check_get_params_invariance,
check_fit2d_predict1d,
check_fit1d_1sample)
def test_all_estimator_no_base_class():
# test that all_estimators doesn't find abstract classes.
for name, Estimator in all_estimators():
msg = ("Base estimators such as {0} should not be included"
" in all_estimators").format(name)
assert_false(name.lower().startswith('base'), msg=msg)
def test_all_estimators():
# Test that estimators are default-constructible, clonable
# and have working repr.
estimators = all_estimators(include_meta_estimators=True)
# Meta sanity-check to make sure that the estimator introspection runs
# properly
assert_greater(len(estimators), 0)
for name, Estimator in estimators:
# some can just not be sensibly default constructed
yield check_parameters_default_constructible, name, Estimator
def test_non_meta_estimators():
# input validation etc for non-meta estimators
estimators = all_estimators()
for name, Estimator in estimators:
if issubclass(Estimator, BiclusterMixin):
continue
if name.startswith("_"):
continue
for check in _yield_all_checks(name, Estimator):
yield check, name, Estimator
def test_configure():
# Smoke test the 'configure' step of setup, this tests all the
# 'configure' functions in the setup.pys in the scikit
cwd = os.getcwd()
setup_path = os.path.abspath(os.path.join(sklearn.__path__[0], '..'))
setup_filename = os.path.join(setup_path, 'setup.py')
if not os.path.exists(setup_filename):
return
try:
os.chdir(setup_path)
old_argv = sys.argv
sys.argv = ['setup.py', 'config']
clean_warning_registry()
with warnings.catch_warnings():
# The configuration spits out warnings when not finding
# Blas/Atlas development headers
warnings.simplefilter('ignore', UserWarning)
if PY3:
with open('setup.py') as f:
exec(f.read(), dict(__name__='__main__'))
else:
execfile('setup.py', dict(__name__='__main__'))
finally:
sys.argv = old_argv
os.chdir(cwd)
def test_class_weight_balanced_linear_classifiers():
classifiers = all_estimators(type_filter='classifier')
clean_warning_registry()
with warnings.catch_warnings(record=True):
linear_classifiers = [
(name, clazz)
for name, clazz in classifiers
if 'class_weight' in clazz().get_params().keys()
and issubclass(clazz, LinearClassifierMixin)]
for name, Classifier in linear_classifiers:
if name == "LogisticRegressionCV":
# Contrary to RidgeClassifierCV, LogisticRegressionCV use actual
# CV folds and fit a model for each CV iteration before averaging
# the coef. Therefore it is expected to not behave exactly as the
# other linear model.
continue
yield check_class_weight_balanced_linear_classifier, name, Classifier
@ignore_warnings
def test_import_all_consistency():
# Smoke test to check that any name in a __all__ list is actually defined
# in the namespace of the module or package.
pkgs = pkgutil.walk_packages(path=sklearn.__path__, prefix='sklearn.',
onerror=lambda _: None)
submods = [modname for _, modname, _ in pkgs]
for modname in submods + ['sklearn']:
if ".tests." in modname:
continue
package = __import__(modname, fromlist="dummy")
for name in getattr(package, '__all__', ()):
if getattr(package, name, None) is None:
raise AttributeError(
"Module '{0}' has no attribute '{1}'".format(
modname, name))
def test_root_import_all_completeness():
EXCEPTIONS = ('utils', 'tests', 'base', 'setup')
for _, modname, _ in pkgutil.walk_packages(path=sklearn.__path__,
onerror=lambda _: None):
if '.' in modname or modname.startswith('_') or modname in EXCEPTIONS:
continue
assert_in(modname, sklearn.__all__)
def test_non_transformer_estimators_n_iter():
# Test that all estimators of type which are non-transformer
# and which have an attribute of max_iter, return the attribute
# of n_iter atleast 1.
for est_type in ['regressor', 'classifier', 'cluster']:
regressors = all_estimators(type_filter=est_type)
for name, Estimator in regressors:
# LassoLars stops early for the default alpha=1.0 for
# the iris dataset.
if name == 'LassoLars':
estimator = Estimator(alpha=0.)
else:
estimator = Estimator()
if hasattr(estimator, "max_iter"):
# These models are dependent on external solvers like
# libsvm and accessing the iter parameter is non-trivial.
if name in (['Ridge', 'SVR', 'NuSVR', 'NuSVC',
'RidgeClassifier', 'SVC', 'RandomizedLasso',
'LogisticRegressionCV']):
continue
# Tested in test_transformer_n_iter below
elif (name in CROSS_DECOMPOSITION or
name in ['LinearSVC', 'LogisticRegression']):
continue
else:
# Multitask models related to ENet cannot handle
# if y is mono-output.
yield (check_non_transformer_estimators_n_iter,
name, estimator, 'Multi' in name)
def test_transformer_n_iter():
transformers = all_estimators(type_filter='transformer')
for name, Estimator in transformers:
estimator = Estimator()
# Dependent on external solvers and hence accessing the iter
# param is non-trivial.
external_solver = ['Isomap', 'KernelPCA', 'LocallyLinearEmbedding',
'RandomizedLasso', 'LogisticRegressionCV']
if hasattr(estimator, "max_iter") and name not in external_solver:
yield check_transformer_n_iter, name, estimator
def test_get_params_invariance():
# Test for estimators that support get_params, that
# get_params(deep=False) is a subset of get_params(deep=True)
# Related to issue #4465
estimators = all_estimators(include_meta_estimators=False, include_other=True)
for name, Estimator in estimators:
if hasattr(Estimator, 'get_params'):
yield check_get_params_invariance, name, Estimator
| bsd-3-clause |
GuessWhoSamFoo/pandas | pandas/io/pytables.py | 1 | 168497 | # pylint: disable-msg=E1101,W0613,W0603
"""
High level interface to PyTables for reading and writing pandas data structures
to disk
"""
import copy
from datetime import date, datetime
from distutils.version import LooseVersion
import itertools
import os
import re
import time
import warnings
import numpy as np
from pandas._libs import algos, lib, writers as libwriters
from pandas._libs.tslibs import timezones
from pandas.compat import PY3, filter, lrange, range, string_types
from pandas.errors import PerformanceWarning
from pandas.core.dtypes.common import (
ensure_int64, ensure_object, ensure_platform_int, is_categorical_dtype,
is_datetime64_dtype, is_datetime64tz_dtype, is_list_like,
is_timedelta64_dtype)
from pandas.core.dtypes.missing import array_equivalent
from pandas import (
DataFrame, DatetimeIndex, Index, Int64Index, MultiIndex, Panel,
PeriodIndex, Series, SparseDataFrame, SparseSeries, TimedeltaIndex, compat,
concat, isna, to_datetime)
from pandas.core import config
from pandas.core.algorithms import match, unique
from pandas.core.arrays.categorical import (
Categorical, _factorize_from_iterables)
from pandas.core.arrays.sparse import BlockIndex, IntIndex
from pandas.core.base import StringMixin
import pandas.core.common as com
from pandas.core.computation.pytables import Expr, maybe_expression
from pandas.core.config import get_option
from pandas.core.index import ensure_index
from pandas.core.internals import (
BlockManager, _block2d_to_blocknd, _block_shape, _factor_indexer,
make_block)
from pandas.io.common import _stringify_path
from pandas.io.formats.printing import adjoin, pprint_thing
# versioning attribute
_version = '0.15.2'
# encoding
# PY3 encoding if we don't specify
_default_encoding = 'UTF-8'
def _ensure_decoded(s):
""" if we have bytes, decode them to unicode """
if isinstance(s, np.bytes_):
s = s.decode('UTF-8')
return s
def _ensure_encoding(encoding):
# set the encoding if we need
if encoding is None:
if PY3:
encoding = _default_encoding
return encoding
def _ensure_str(name):
"""Ensure that an index / column name is a str (python 3) or
unicode (python 2); otherwise they may be np.string dtype.
Non-string dtypes are passed through unchanged.
https://github.com/pandas-dev/pandas/issues/13492
"""
if isinstance(name, compat.string_types):
name = compat.text_type(name)
return name
Term = Expr
def _ensure_term(where, scope_level):
"""
ensure that the where is a Term or a list of Term
this makes sure that we are capturing the scope of variables
that are passed
create the terms here with a frame_level=2 (we are 2 levels down)
"""
# only consider list/tuple here as an ndarray is automatically a coordinate
# list
level = scope_level + 1
if isinstance(where, (list, tuple)):
wlist = []
for w in filter(lambda x: x is not None, where):
if not maybe_expression(w):
wlist.append(w)
else:
wlist.append(Term(w, scope_level=level))
where = wlist
elif maybe_expression(where):
where = Term(where, scope_level=level)
return where
class PossibleDataLossError(Exception):
pass
class ClosedFileError(Exception):
pass
class IncompatibilityWarning(Warning):
pass
incompatibility_doc = """
where criteria is being ignored as this version [%s] is too old (or
not-defined), read the file in and write it out to a new file to upgrade (with
the copy_to method)
"""
class AttributeConflictWarning(Warning):
pass
attribute_conflict_doc = """
the [%s] attribute of the existing index is [%s] which conflicts with the new
[%s], resetting the attribute to None
"""
class DuplicateWarning(Warning):
pass
duplicate_doc = """
duplicate entries in table, taking most recently appended
"""
performance_doc = """
your performance may suffer as PyTables will pickle object types that it cannot
map directly to c-types [inferred_type->%s,key->%s] [items->%s]
"""
# formats
_FORMAT_MAP = {
u'f': 'fixed',
u'fixed': 'fixed',
u't': 'table',
u'table': 'table',
}
format_deprecate_doc = """
the table keyword has been deprecated
use the format='fixed(f)|table(t)' keyword instead
fixed(f) : specifies the Fixed format
and is the default for put operations
table(t) : specifies the Table format
and is the default for append operations
"""
# map object types
_TYPE_MAP = {
Series: u'series',
SparseSeries: u'sparse_series',
DataFrame: u'frame',
SparseDataFrame: u'sparse_frame',
Panel: u'wide',
}
# storer class map
_STORER_MAP = {
u'Series': 'LegacySeriesFixed',
u'DataFrame': 'LegacyFrameFixed',
u'DataMatrix': 'LegacyFrameFixed',
u'series': 'SeriesFixed',
u'sparse_series': 'SparseSeriesFixed',
u'frame': 'FrameFixed',
u'sparse_frame': 'SparseFrameFixed',
u'wide': 'PanelFixed',
}
# table class map
_TABLE_MAP = {
u'generic_table': 'GenericTable',
u'appendable_series': 'AppendableSeriesTable',
u'appendable_multiseries': 'AppendableMultiSeriesTable',
u'appendable_frame': 'AppendableFrameTable',
u'appendable_multiframe': 'AppendableMultiFrameTable',
u'appendable_panel': 'AppendablePanelTable',
u'worm': 'WORMTable',
u'legacy_frame': 'LegacyFrameTable',
u'legacy_panel': 'LegacyPanelTable',
}
# axes map
_AXES_MAP = {
DataFrame: [0],
Panel: [1, 2]
}
# register our configuration options
dropna_doc = """
: boolean
drop ALL nan rows when appending to a table
"""
format_doc = """
: format
default format writing format, if None, then
put will default to 'fixed' and append will default to 'table'
"""
with config.config_prefix('io.hdf'):
config.register_option('dropna_table', False, dropna_doc,
validator=config.is_bool)
config.register_option(
'default_format', None, format_doc,
validator=config.is_one_of_factory(['fixed', 'table', None])
)
# oh the troubles to reduce import time
_table_mod = None
_table_file_open_policy_is_strict = False
def _tables():
global _table_mod
global _table_file_open_policy_is_strict
if _table_mod is None:
import tables
_table_mod = tables
# version requirements
if LooseVersion(tables.__version__) < LooseVersion('3.0.0'):
raise ImportError("PyTables version >= 3.0.0 is required")
# set the file open policy
# return the file open policy; this changes as of pytables 3.1
# depending on the HDF5 version
try:
_table_file_open_policy_is_strict = (
tables.file._FILE_OPEN_POLICY == 'strict')
except AttributeError:
pass
return _table_mod
# interface to/from ###
def to_hdf(path_or_buf, key, value, mode=None, complevel=None, complib=None,
append=None, **kwargs):
""" store this object, close it if we opened it """
if append:
f = lambda store: store.append(key, value, **kwargs)
else:
f = lambda store: store.put(key, value, **kwargs)
path_or_buf = _stringify_path(path_or_buf)
if isinstance(path_or_buf, string_types):
with HDFStore(path_or_buf, mode=mode, complevel=complevel,
complib=complib) as store:
f(store)
else:
f(path_or_buf)
def read_hdf(path_or_buf, key=None, mode='r', **kwargs):
"""
Read from the store, close it if we opened it.
Retrieve pandas object stored in file, optionally based on where
criteria
Parameters
----------
path_or_buf : string, buffer or path object
Path to the file to open, or an open :class:`pandas.HDFStore` object.
Supports any object implementing the ``__fspath__`` protocol.
This includes :class:`pathlib.Path` and py._path.local.LocalPath
objects.
.. versionadded:: 0.19.0 support for pathlib, py.path.
.. versionadded:: 0.21.0 support for __fspath__ protocol.
key : object, optional
The group identifier in the store. Can be omitted if the HDF file
contains a single pandas object.
mode : {'r', 'r+', 'a'}, optional
Mode to use when opening the file. Ignored if path_or_buf is a
:class:`pandas.HDFStore`. Default is 'r'.
where : list, optional
A list of Term (or convertible) objects.
start : int, optional
Row number to start selection.
stop : int, optional
Row number to stop selection.
columns : list, optional
A list of columns names to return.
iterator : bool, optional
Return an iterator object.
chunksize : int, optional
Number of rows to include in an iteration when using an iterator.
errors : str, default 'strict'
Specifies how encoding and decoding errors are to be handled.
See the errors argument for :func:`open` for a full list
of options.
**kwargs
Additional keyword arguments passed to HDFStore.
Returns
-------
item : object
The selected object. Return type depends on the object stored.
See Also
--------
pandas.DataFrame.to_hdf : Write a HDF file from a DataFrame.
pandas.HDFStore : Low-level access to HDF files.
Examples
--------
>>> df = pd.DataFrame([[1, 1.0, 'a']], columns=['x', 'y', 'z'])
>>> df.to_hdf('./store.h5', 'data')
>>> reread = pd.read_hdf('./store.h5')
"""
if mode not in ['r', 'r+', 'a']:
raise ValueError('mode {0} is not allowed while performing a read. '
'Allowed modes are r, r+ and a.'.format(mode))
# grab the scope
if 'where' in kwargs:
kwargs['where'] = _ensure_term(kwargs['where'], scope_level=1)
if isinstance(path_or_buf, HDFStore):
if not path_or_buf.is_open:
raise IOError('The HDFStore must be open for reading.')
store = path_or_buf
auto_close = False
else:
path_or_buf = _stringify_path(path_or_buf)
if not isinstance(path_or_buf, string_types):
raise NotImplementedError('Support for generic buffers has not '
'been implemented.')
try:
exists = os.path.exists(path_or_buf)
# if filepath is too long
except (TypeError, ValueError):
exists = False
if not exists:
raise compat.FileNotFoundError(
'File {path} does not exist'.format(path=path_or_buf))
store = HDFStore(path_or_buf, mode=mode, **kwargs)
# can't auto open/close if we are using an iterator
# so delegate to the iterator
auto_close = True
try:
if key is None:
groups = store.groups()
if len(groups) == 0:
raise ValueError('No dataset in HDF5 file.')
candidate_only_group = groups[0]
# For the HDF file to have only one dataset, all other groups
# should then be metadata groups for that candidate group. (This
# assumes that the groups() method enumerates parent groups
# before their children.)
for group_to_check in groups[1:]:
if not _is_metadata_of(group_to_check, candidate_only_group):
raise ValueError('key must be provided when HDF5 file '
'contains multiple datasets.')
key = candidate_only_group._v_pathname
return store.select(key, auto_close=auto_close, **kwargs)
except (ValueError, TypeError):
# if there is an error, close the store
try:
store.close()
except AttributeError:
pass
raise
def _is_metadata_of(group, parent_group):
"""Check if a given group is a metadata group for a given parent_group."""
if group._v_depth <= parent_group._v_depth:
return False
current = group
while current._v_depth > 1:
parent = current._v_parent
if parent == parent_group and current._v_name == 'meta':
return True
current = current._v_parent
return False
class HDFStore(StringMixin):
"""
Dict-like IO interface for storing pandas objects in PyTables
either Fixed or Table format.
Parameters
----------
path : string
File path to HDF5 file
mode : {'a', 'w', 'r', 'r+'}, default 'a'
``'r'``
Read-only; no data can be modified.
``'w'``
Write; a new file is created (an existing file with the same
name would be deleted).
``'a'``
Append; an existing file is opened for reading and writing,
and if the file does not exist it is created.
``'r+'``
It is similar to ``'a'``, but the file must already exist.
complevel : int, 0-9, default None
Specifies a compression level for data.
A value of 0 disables compression.
complib : {'zlib', 'lzo', 'bzip2', 'blosc'}, default 'zlib'
Specifies the compression library to be used.
As of v0.20.2 these additional compressors for Blosc are supported
(default if no compressor specified: 'blosc:blosclz'):
{'blosc:blosclz', 'blosc:lz4', 'blosc:lz4hc', 'blosc:snappy',
'blosc:zlib', 'blosc:zstd'}.
Specifying a compression library which is not available issues
a ValueError.
fletcher32 : bool, default False
If applying compression use the fletcher32 checksum
Examples
--------
>>> bar = pd.DataFrame(np.random.randn(10, 4))
>>> store = pd.HDFStore('test.h5')
>>> store['foo'] = bar # write to HDF5
>>> bar = store['foo'] # retrieve
>>> store.close()
"""
def __init__(self, path, mode=None, complevel=None, complib=None,
fletcher32=False, **kwargs):
if 'format' in kwargs:
raise ValueError('format is not a defined argument for HDFStore')
try:
import tables # noqa
except ImportError as ex: # pragma: no cover
raise ImportError('HDFStore requires PyTables, "{ex!s}" problem '
'importing'.format(ex=ex))
if complib is not None and complib not in tables.filters.all_complibs:
raise ValueError(
"complib only supports {libs} compression.".format(
libs=tables.filters.all_complibs))
if complib is None and complevel is not None:
complib = tables.filters.default_complib
self._path = _stringify_path(path)
if mode is None:
mode = 'a'
self._mode = mode
self._handle = None
self._complevel = complevel if complevel else 0
self._complib = complib
self._fletcher32 = fletcher32
self._filters = None
self.open(mode=mode, **kwargs)
def __fspath__(self):
return self._path
@property
def root(self):
""" return the root node """
self._check_if_open()
return self._handle.root
@property
def filename(self):
return self._path
def __getitem__(self, key):
return self.get(key)
def __setitem__(self, key, value):
self.put(key, value)
def __delitem__(self, key):
return self.remove(key)
def __getattr__(self, name):
""" allow attribute access to get stores """
try:
return self.get(name)
except (KeyError, ClosedFileError):
pass
raise AttributeError(
"'{object}' object has no attribute '{name}'".format(
object=type(self).__name__, name=name))
def __contains__(self, key):
""" check for existence of this key
can match the exact pathname or the pathnm w/o the leading '/'
"""
node = self.get_node(key)
if node is not None:
name = node._v_pathname
if name == key or name[1:] == key:
return True
return False
def __len__(self):
return len(self.groups())
def __unicode__(self):
return '{type}\nFile path: {path}\n'.format(
type=type(self), path=pprint_thing(self._path))
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def keys(self):
"""
Return a (potentially unordered) list of the keys corresponding to the
objects stored in the HDFStore. These are ABSOLUTE path-names (e.g.
have the leading '/'
"""
return [n._v_pathname for n in self.groups()]
def __iter__(self):
return iter(self.keys())
def items(self):
"""
iterate on key->group
"""
for g in self.groups():
yield g._v_pathname, g
iteritems = items
def open(self, mode='a', **kwargs):
"""
Open the file in the specified mode
Parameters
----------
mode : {'a', 'w', 'r', 'r+'}, default 'a'
See HDFStore docstring or tables.open_file for info about modes
"""
tables = _tables()
if self._mode != mode:
# if we are changing a write mode to read, ok
if self._mode in ['a', 'w'] and mode in ['r', 'r+']:
pass
elif mode in ['w']:
# this would truncate, raise here
if self.is_open:
raise PossibleDataLossError(
"Re-opening the file [{0}] with mode [{1}] "
"will delete the current file!"
.format(self._path, self._mode)
)
self._mode = mode
# close and reopen the handle
if self.is_open:
self.close()
if self._complevel and self._complevel > 0:
self._filters = _tables().Filters(self._complevel, self._complib,
fletcher32=self._fletcher32)
try:
self._handle = tables.open_file(self._path, self._mode, **kwargs)
except (IOError) as e: # pragma: no cover
if 'can not be written' in str(e):
print(
'Opening {path} in read-only mode'.format(path=self._path))
self._handle = tables.open_file(self._path, 'r', **kwargs)
else:
raise
except (ValueError) as e:
# trap PyTables >= 3.1 FILE_OPEN_POLICY exception
# to provide an updated message
if 'FILE_OPEN_POLICY' in str(e):
e = ValueError(
"PyTables [{version}] no longer supports opening multiple "
"files\n"
"even in read-only mode on this HDF5 version "
"[{hdf_version}]. You can accept this\n"
"and not open the same file multiple times at once,\n"
"upgrade the HDF5 version, or downgrade to PyTables 3.0.0 "
"which allows\n"
"files to be opened multiple times at once\n"
.format(version=tables.__version__,
hdf_version=tables.get_hdf5_version()))
raise e
except (Exception) as e:
# trying to read from a non-existent file causes an error which
# is not part of IOError, make it one
if self._mode == 'r' and 'Unable to open/create file' in str(e):
raise IOError(str(e))
raise
def close(self):
"""
Close the PyTables file handle
"""
if self._handle is not None:
self._handle.close()
self._handle = None
@property
def is_open(self):
"""
return a boolean indicating whether the file is open
"""
if self._handle is None:
return False
return bool(self._handle.isopen)
def flush(self, fsync=False):
"""
Force all buffered modifications to be written to disk.
Parameters
----------
fsync : bool (default False)
call ``os.fsync()`` on the file handle to force writing to disk.
Notes
-----
Without ``fsync=True``, flushing may not guarantee that the OS writes
to disk. With fsync, the operation will block until the OS claims the
file has been written; however, other caching layers may still
interfere.
"""
if self._handle is not None:
self._handle.flush()
if fsync:
try:
os.fsync(self._handle.fileno())
except OSError:
pass
def get(self, key):
"""
Retrieve pandas object stored in file
Parameters
----------
key : object
Returns
-------
obj : same type as object stored in file
"""
group = self.get_node(key)
if group is None:
raise KeyError('No object named {key} in the file'.format(key=key))
return self._read_group(group)
def select(self, key, where=None, start=None, stop=None, columns=None,
iterator=False, chunksize=None, auto_close=False, **kwargs):
"""
Retrieve pandas object stored in file, optionally based on where
criteria
Parameters
----------
key : object
where : list of Term (or convertible) objects, optional
start : integer (defaults to None), row number to start selection
stop : integer (defaults to None), row number to stop selection
columns : a list of columns that if not None, will limit the return
columns
iterator : boolean, return an iterator, default False
chunksize : nrows to include in iteration, return an iterator
auto_close : boolean, should automatically close the store when
finished, default is False
Returns
-------
The selected object
"""
group = self.get_node(key)
if group is None:
raise KeyError('No object named {key} in the file'.format(key=key))
# create the storer and axes
where = _ensure_term(where, scope_level=1)
s = self._create_storer(group)
s.infer_axes()
# function to call on iteration
def func(_start, _stop, _where):
return s.read(start=_start, stop=_stop,
where=_where,
columns=columns)
# create the iterator
it = TableIterator(self, s, func, where=where, nrows=s.nrows,
start=start, stop=stop, iterator=iterator,
chunksize=chunksize, auto_close=auto_close)
return it.get_result()
def select_as_coordinates(
self, key, where=None, start=None, stop=None, **kwargs):
"""
return the selection as an Index
Parameters
----------
key : object
where : list of Term (or convertible) objects, optional
start : integer (defaults to None), row number to start selection
stop : integer (defaults to None), row number to stop selection
"""
where = _ensure_term(where, scope_level=1)
return self.get_storer(key).read_coordinates(where=where, start=start,
stop=stop, **kwargs)
def select_column(self, key, column, **kwargs):
"""
return a single column from the table. This is generally only useful to
select an indexable
Parameters
----------
key : object
column: the column of interest
Exceptions
----------
raises KeyError if the column is not found (or key is not a valid
store)
raises ValueError if the column can not be extracted individually (it
is part of a data block)
"""
return self.get_storer(key).read_column(column=column, **kwargs)
def select_as_multiple(self, keys, where=None, selector=None, columns=None,
start=None, stop=None, iterator=False,
chunksize=None, auto_close=False, **kwargs):
""" Retrieve pandas objects from multiple tables
Parameters
----------
keys : a list of the tables
selector : the table to apply the where criteria (defaults to keys[0]
if not supplied)
columns : the columns I want back
start : integer (defaults to None), row number to start selection
stop : integer (defaults to None), row number to stop selection
iterator : boolean, return an iterator, default False
chunksize : nrows to include in iteration, return an iterator
Exceptions
----------
raises KeyError if keys or selector is not found or keys is empty
raises TypeError if keys is not a list or tuple
raises ValueError if the tables are not ALL THE SAME DIMENSIONS
"""
# default to single select
where = _ensure_term(where, scope_level=1)
if isinstance(keys, (list, tuple)) and len(keys) == 1:
keys = keys[0]
if isinstance(keys, string_types):
return self.select(key=keys, where=where, columns=columns,
start=start, stop=stop, iterator=iterator,
chunksize=chunksize, **kwargs)
if not isinstance(keys, (list, tuple)):
raise TypeError("keys must be a list/tuple")
if not len(keys):
raise ValueError("keys must have a non-zero length")
if selector is None:
selector = keys[0]
# collect the tables
tbls = [self.get_storer(k) for k in keys]
s = self.get_storer(selector)
# validate rows
nrows = None
for t, k in itertools.chain([(s, selector)], zip(tbls, keys)):
if t is None:
raise KeyError("Invalid table [{key}]".format(key=k))
if not t.is_table:
raise TypeError(
"object [{obj}] is not a table, and cannot be used in all "
"select as multiple".format(obj=t.pathname)
)
if nrows is None:
nrows = t.nrows
elif t.nrows != nrows:
raise ValueError(
"all tables must have exactly the same nrows!")
# axis is the concentation axes
axis = list({t.non_index_axes[0][0] for t in tbls})[0]
def func(_start, _stop, _where):
# retrieve the objs, _where is always passed as a set of
# coordinates here
objs = [t.read(where=_where, columns=columns, start=_start,
stop=_stop, **kwargs) for t in tbls]
# concat and return
return concat(objs, axis=axis,
verify_integrity=False)._consolidate()
# create the iterator
it = TableIterator(self, s, func, where=where, nrows=nrows,
start=start, stop=stop, iterator=iterator,
chunksize=chunksize, auto_close=auto_close)
return it.get_result(coordinates=True)
def put(self, key, value, format=None, append=False, **kwargs):
"""
Store object in HDFStore
Parameters
----------
key : object
value : {Series, DataFrame, Panel}
format : 'fixed(f)|table(t)', default is 'fixed'
fixed(f) : Fixed format
Fast writing/reading. Not-appendable, nor searchable
table(t) : Table format
Write as a PyTables Table structure which may perform
worse but allow more flexible operations like searching
/ selecting subsets of the data
append : boolean, default False
This will force Table format, append the input data to the
existing.
data_columns : list of columns to create as data columns, or True to
use all columns. See
`here <http://pandas.pydata.org/pandas-docs/stable/io.html#query-via-data-columns>`__ # noqa
encoding : default None, provide an encoding for strings
dropna : boolean, default False, do not write an ALL nan row to
the store settable by the option 'io.hdf.dropna_table'
"""
if format is None:
format = get_option("io.hdf.default_format") or 'fixed'
kwargs = self._validate_format(format, kwargs)
self._write_to_group(key, value, append=append, **kwargs)
def remove(self, key, where=None, start=None, stop=None):
"""
Remove pandas object partially by specifying the where condition
Parameters
----------
key : string
Node to remove or delete rows from
where : list of Term (or convertible) objects, optional
start : integer (defaults to None), row number to start selection
stop : integer (defaults to None), row number to stop selection
Returns
-------
number of rows removed (or None if not a Table)
Exceptions
----------
raises KeyError if key is not a valid store
"""
where = _ensure_term(where, scope_level=1)
try:
s = self.get_storer(key)
except KeyError:
# the key is not a valid store, re-raising KeyError
raise
except Exception:
if where is not None:
raise ValueError(
"trying to remove a node with a non-None where clause!")
# we are actually trying to remove a node (with children)
s = self.get_node(key)
if s is not None:
s._f_remove(recursive=True)
return None
# remove the node
if com._all_none(where, start, stop):
s.group._f_remove(recursive=True)
# delete from the table
else:
if not s.is_table:
raise ValueError(
'can only remove with where on objects written as tables')
return s.delete(where=where, start=start, stop=stop)
def append(self, key, value, format=None, append=True, columns=None,
dropna=None, **kwargs):
"""
Append to Table in file. Node must already exist and be Table
format.
Parameters
----------
key : object
value : {Series, DataFrame, Panel}
format : 'table' is the default
table(t) : table format
Write as a PyTables Table structure which may perform
worse but allow more flexible operations like searching
/ selecting subsets of the data
append : boolean, default True, append the input data to the
existing
data_columns : list of columns, or True, default None
List of columns to create as indexed data columns for on-disk
queries, or True to use all columns. By default only the axes
of the object are indexed. See `here
<http://pandas.pydata.org/pandas-docs/stable/io.html#query-via-data-columns>`__.
min_itemsize : dict of columns that specify minimum string sizes
nan_rep : string to use as string nan represenation
chunksize : size to chunk the writing
expectedrows : expected TOTAL row size of this table
encoding : default None, provide an encoding for strings
dropna : boolean, default False, do not write an ALL nan row to
the store settable by the option 'io.hdf.dropna_table'
Notes
-----
Does *not* check if data being appended overlaps with existing
data in the table, so be careful
"""
if columns is not None:
raise TypeError("columns is not a supported keyword in append, "
"try data_columns")
if dropna is None:
dropna = get_option("io.hdf.dropna_table")
if format is None:
format = get_option("io.hdf.default_format") or 'table'
kwargs = self._validate_format(format, kwargs)
self._write_to_group(key, value, append=append, dropna=dropna,
**kwargs)
def append_to_multiple(self, d, value, selector, data_columns=None,
axes=None, dropna=False, **kwargs):
"""
Append to multiple tables
Parameters
----------
d : a dict of table_name to table_columns, None is acceptable as the
values of one node (this will get all the remaining columns)
value : a pandas object
selector : a string that designates the indexable table; all of its
columns will be designed as data_columns, unless data_columns is
passed, in which case these are used
data_columns : list of columns to create as data columns, or True to
use all columns
dropna : if evaluates to True, drop rows from all tables if any single
row in each table has all NaN. Default False.
Notes
-----
axes parameter is currently not accepted
"""
if axes is not None:
raise TypeError("axes is currently not accepted as a parameter to"
" append_to_multiple; you can create the "
"tables independently instead")
if not isinstance(d, dict):
raise ValueError(
"append_to_multiple must have a dictionary specified as the "
"way to split the value"
)
if selector not in d:
raise ValueError(
"append_to_multiple requires a selector that is in passed dict"
)
# figure out the splitting axis (the non_index_axis)
axis = list(set(range(value.ndim)) - set(_AXES_MAP[type(value)]))[0]
# figure out how to split the value
remain_key = None
remain_values = []
for k, v in d.items():
if v is None:
if remain_key is not None:
raise ValueError(
"append_to_multiple can only have one value in d that "
"is None"
)
remain_key = k
else:
remain_values.extend(v)
if remain_key is not None:
ordered = value.axes[axis]
ordd = ordered.difference(Index(remain_values))
ordd = sorted(ordered.get_indexer(ordd))
d[remain_key] = ordered.take(ordd)
# data_columns
if data_columns is None:
data_columns = d[selector]
# ensure rows are synchronized across the tables
if dropna:
idxs = (value[cols].dropna(how='all').index for cols in d.values())
valid_index = next(idxs)
for index in idxs:
valid_index = valid_index.intersection(index)
value = value.loc[valid_index]
# append
for k, v in d.items():
dc = data_columns if k == selector else None
# compute the val
val = value.reindex(v, axis=axis)
self.append(k, val, data_columns=dc, **kwargs)
def create_table_index(self, key, **kwargs):
""" Create a pytables index on the table
Parameters
----------
key : object (the node to index)
Exceptions
----------
raises if the node is not a table
"""
# version requirements
_tables()
s = self.get_storer(key)
if s is None:
return
if not s.is_table:
raise TypeError(
"cannot create table index on a Fixed format store")
s.create_index(**kwargs)
def groups(self):
"""return a list of all the top-level nodes (that are not themselves a
pandas storage object)
"""
_tables()
self._check_if_open()
return [
g for g in self._handle.walk_groups()
if (not isinstance(g, _table_mod.link.Link) and
(getattr(g._v_attrs, 'pandas_type', None) or
getattr(g, 'table', None) or
(isinstance(g, _table_mod.table.Table) and
g._v_name != u'table')))
]
def walk(self, where="/"):
""" Walk the pytables group hierarchy for pandas objects
This generator will yield the group path, subgroups and pandas object
names for each group.
Any non-pandas PyTables objects that are not a group will be ignored.
The `where` group itself is listed first (preorder), then each of its
child groups (following an alphanumerical order) is also traversed,
following the same procedure.
.. versionadded:: 0.24.0
Parameters
----------
where : str, optional
Group where to start walking.
If not supplied, the root group is used.
Yields
------
path : str
Full path to a group (without trailing '/')
groups : list of str
names of the groups contained in `path`
leaves : list of str
names of the pandas objects contained in `path`
"""
_tables()
self._check_if_open()
for g in self._handle.walk_groups(where):
if getattr(g._v_attrs, 'pandas_type', None) is not None:
continue
groups = []
leaves = []
for child in g._v_children.values():
pandas_type = getattr(child._v_attrs, 'pandas_type', None)
if pandas_type is None:
if isinstance(child, _table_mod.group.Group):
groups.append(child._v_name)
else:
leaves.append(child._v_name)
yield (g._v_pathname.rstrip('/'), groups, leaves)
def get_node(self, key):
""" return the node with the key or None if it does not exist """
self._check_if_open()
try:
if not key.startswith('/'):
key = '/' + key
return self._handle.get_node(self.root, key)
except _table_mod.exceptions.NoSuchNodeError:
return None
def get_storer(self, key):
""" return the storer object for a key, raise if not in the file """
group = self.get_node(key)
if group is None:
raise KeyError('No object named {key} in the file'.format(key=key))
s = self._create_storer(group)
s.infer_axes()
return s
def copy(self, file, mode='w', propindexes=True, keys=None, complib=None,
complevel=None, fletcher32=False, overwrite=True):
""" copy the existing store to a new file, upgrading in place
Parameters
----------
propindexes: restore indexes in copied file (defaults to True)
keys : list of keys to include in the copy (defaults to all)
overwrite : overwrite (remove and replace) existing nodes in the
new store (default is True)
mode, complib, complevel, fletcher32 same as in HDFStore.__init__
Returns
-------
open file handle of the new store
"""
new_store = HDFStore(
file,
mode=mode,
complib=complib,
complevel=complevel,
fletcher32=fletcher32)
if keys is None:
keys = list(self.keys())
if not isinstance(keys, (tuple, list)):
keys = [keys]
for k in keys:
s = self.get_storer(k)
if s is not None:
if k in new_store:
if overwrite:
new_store.remove(k)
data = self.select(k)
if s.is_table:
index = False
if propindexes:
index = [a.name for a in s.axes if a.is_indexed]
new_store.append(
k, data, index=index,
data_columns=getattr(s, 'data_columns', None),
encoding=s.encoding
)
else:
new_store.put(k, data, encoding=s.encoding)
return new_store
def info(self):
"""
Print detailed information on the store.
.. versionadded:: 0.21.0
"""
output = '{type}\nFile path: {path}\n'.format(
type=type(self), path=pprint_thing(self._path))
if self.is_open:
lkeys = sorted(list(self.keys()))
if len(lkeys):
keys = []
values = []
for k in lkeys:
try:
s = self.get_storer(k)
if s is not None:
keys.append(pprint_thing(s.pathname or k))
values.append(
pprint_thing(s or 'invalid_HDFStore node'))
except Exception as detail:
keys.append(k)
values.append(
"[invalid_HDFStore node: {detail}]".format(
detail=pprint_thing(detail)))
output += adjoin(12, keys, values)
else:
output += 'Empty'
else:
output += "File is CLOSED"
return output
# private methods ######
def _check_if_open(self):
if not self.is_open:
raise ClosedFileError("{0} file is not open!".format(self._path))
def _validate_format(self, format, kwargs):
""" validate / deprecate formats; return the new kwargs """
kwargs = kwargs.copy()
# validate
try:
kwargs['format'] = _FORMAT_MAP[format.lower()]
except KeyError:
raise TypeError("invalid HDFStore format specified [{0}]"
.format(format))
return kwargs
def _create_storer(self, group, format=None, value=None, append=False,
**kwargs):
""" return a suitable class to operate """
def error(t):
raise TypeError(
"cannot properly create the storer for: [{t}] [group->"
"{group},value->{value},format->{format},append->{append},"
"kwargs->{kwargs}]".format(t=t, group=group,
value=type(value), format=format,
append=append, kwargs=kwargs))
pt = _ensure_decoded(getattr(group._v_attrs, 'pandas_type', None))
tt = _ensure_decoded(getattr(group._v_attrs, 'table_type', None))
# infer the pt from the passed value
if pt is None:
if value is None:
_tables()
if (getattr(group, 'table', None) or
isinstance(group, _table_mod.table.Table)):
pt = u'frame_table'
tt = u'generic_table'
else:
raise TypeError(
"cannot create a storer if the object is not existing "
"nor a value are passed")
else:
try:
pt = _TYPE_MAP[type(value)]
except KeyError:
error('_TYPE_MAP')
# we are actually a table
if format == 'table':
pt += u'_table'
# a storer node
if u'table' not in pt:
try:
return globals()[_STORER_MAP[pt]](self, group, **kwargs)
except KeyError:
error('_STORER_MAP')
# existing node (and must be a table)
if tt is None:
# if we are a writer, determine the tt
if value is not None:
if pt == u'series_table':
index = getattr(value, 'index', None)
if index is not None:
if index.nlevels == 1:
tt = u'appendable_series'
elif index.nlevels > 1:
tt = u'appendable_multiseries'
elif pt == u'frame_table':
index = getattr(value, 'index', None)
if index is not None:
if index.nlevels == 1:
tt = u'appendable_frame'
elif index.nlevels > 1:
tt = u'appendable_multiframe'
elif pt == u'wide_table':
tt = u'appendable_panel'
elif pt == u'ndim_table':
tt = u'appendable_ndim'
else:
# distiguish between a frame/table
tt = u'legacy_panel'
try:
fields = group.table._v_attrs.fields
if len(fields) == 1 and fields[0] == u'value':
tt = u'legacy_frame'
except IndexError:
pass
try:
return globals()[_TABLE_MAP[tt]](self, group, **kwargs)
except KeyError:
error('_TABLE_MAP')
def _write_to_group(self, key, value, format, index=True, append=False,
complib=None, encoding=None, **kwargs):
group = self.get_node(key)
# remove the node if we are not appending
if group is not None and not append:
self._handle.remove_node(group, recursive=True)
group = None
# we don't want to store a table node at all if are object is 0-len
# as there are not dtypes
if getattr(value, 'empty', None) and (format == 'table' or append):
return
if group is None:
paths = key.split('/')
# recursively create the groups
path = '/'
for p in paths:
if not len(p):
continue
new_path = path
if not path.endswith('/'):
new_path += '/'
new_path += p
group = self.get_node(new_path)
if group is None:
group = self._handle.create_group(path, p)
path = new_path
s = self._create_storer(group, format, value, append=append,
encoding=encoding, **kwargs)
if append:
# raise if we are trying to append to a Fixed format,
# or a table that exists (and we are putting)
if (not s.is_table or
(s.is_table and format == 'fixed' and s.is_exists)):
raise ValueError('Can only append to Tables')
if not s.is_exists:
s.set_object_info()
else:
s.set_object_info()
if not s.is_table and complib:
raise ValueError(
'Compression not supported on Fixed format stores'
)
# write the object
s.write(obj=value, append=append, complib=complib, **kwargs)
if s.is_table and index:
s.create_index(columns=index)
def _read_group(self, group, **kwargs):
s = self._create_storer(group)
s.infer_axes()
return s.read(**kwargs)
class TableIterator(object):
""" define the iteration interface on a table
Parameters
----------
store : the reference store
s : the referred storer
func : the function to execute the query
where : the where of the query
nrows : the rows to iterate on
start : the passed start value (default is None)
stop : the passed stop value (default is None)
iterator : boolean, whether to use the default iterator
chunksize : the passed chunking value (default is 50000)
auto_close : boolean, automatically close the store at the end of
iteration, default is False
kwargs : the passed kwargs
"""
def __init__(self, store, s, func, where, nrows, start=None, stop=None,
iterator=False, chunksize=None, auto_close=False):
self.store = store
self.s = s
self.func = func
self.where = where
# set start/stop if they are not set if we are a table
if self.s.is_table:
if nrows is None:
nrows = 0
if start is None:
start = 0
if stop is None:
stop = nrows
stop = min(nrows, stop)
self.nrows = nrows
self.start = start
self.stop = stop
self.coordinates = None
if iterator or chunksize is not None:
if chunksize is None:
chunksize = 100000
self.chunksize = int(chunksize)
else:
self.chunksize = None
self.auto_close = auto_close
def __iter__(self):
# iterate
current = self.start
while current < self.stop:
stop = min(current + self.chunksize, self.stop)
value = self.func(None, None, self.coordinates[current:stop])
current = stop
if value is None or not len(value):
continue
yield value
self.close()
def close(self):
if self.auto_close:
self.store.close()
def get_result(self, coordinates=False):
# return the actual iterator
if self.chunksize is not None:
if not self.s.is_table:
raise TypeError(
"can only use an iterator or chunksize on a table")
self.coordinates = self.s.read_coordinates(where=self.where)
return self
# if specified read via coordinates (necessary for multiple selections
if coordinates:
where = self.s.read_coordinates(where=self.where, start=self.start,
stop=self.stop)
else:
where = self.where
# directly return the result
results = self.func(self.start, self.stop, where)
self.close()
return results
class IndexCol(StringMixin):
""" an index column description class
Parameters
----------
axis : axis which I reference
values : the ndarray like converted values
kind : a string description of this type
typ : the pytables type
pos : the position in the pytables
"""
is_an_indexable = True
is_data_indexable = True
_info_fields = ['freq', 'tz', 'index_name']
def __init__(self, values=None, kind=None, typ=None, cname=None,
itemsize=None, name=None, axis=None, kind_attr=None,
pos=None, freq=None, tz=None, index_name=None, **kwargs):
self.values = values
self.kind = kind
self.typ = typ
self.itemsize = itemsize
self.name = name
self.cname = cname
self.kind_attr = kind_attr
self.axis = axis
self.pos = pos
self.freq = freq
self.tz = tz
self.index_name = index_name
self.table = None
self.meta = None
self.metadata = None
if name is not None:
self.set_name(name, kind_attr)
if pos is not None:
self.set_pos(pos)
def set_name(self, name, kind_attr=None):
""" set the name of this indexer """
self.name = name
self.kind_attr = kind_attr or "{name}_kind".format(name=name)
if self.cname is None:
self.cname = name
return self
def set_axis(self, axis):
""" set the axis over which I index """
self.axis = axis
return self
def set_pos(self, pos):
""" set the position of this column in the Table """
self.pos = pos
if pos is not None and self.typ is not None:
self.typ._v_pos = pos
return self
def set_table(self, table):
self.table = table
return self
def __unicode__(self):
temp = tuple(
map(pprint_thing,
(self.name,
self.cname,
self.axis,
self.pos,
self.kind)))
return ','.join(("{key}->{value}".format(key=key, value=value)
for key, value in zip(
['name', 'cname', 'axis', 'pos', 'kind'], temp)))
def __eq__(self, other):
""" compare 2 col items """
return all(getattr(self, a, None) == getattr(other, a, None)
for a in ['name', 'cname', 'axis', 'pos'])
def __ne__(self, other):
return not self.__eq__(other)
@property
def is_indexed(self):
""" return whether I am an indexed column """
try:
return getattr(self.table.cols, self.cname).is_indexed
except AttributeError:
False
def copy(self):
new_self = copy.copy(self)
return new_self
def infer(self, handler):
"""infer this column from the table: create and return a new object"""
table = handler.table
new_self = self.copy()
new_self.set_table(table)
new_self.get_attr()
new_self.read_metadata(handler)
return new_self
def convert(self, values, nan_rep, encoding, errors):
""" set the values from this selection: take = take ownership """
# values is a recarray
if values.dtype.fields is not None:
values = values[self.cname]
values = _maybe_convert(values, self.kind, encoding, errors)
kwargs = dict()
if self.freq is not None:
kwargs['freq'] = _ensure_decoded(self.freq)
if self.index_name is not None:
kwargs['name'] = _ensure_decoded(self.index_name)
# making an Index instance could throw a number of different errors
try:
self.values = Index(values, **kwargs)
except Exception: # noqa: E722
# if the output freq is different that what we recorded,
# it should be None (see also 'doc example part 2')
if 'freq' in kwargs:
kwargs['freq'] = None
self.values = Index(values, **kwargs)
self.values = _set_tz(self.values, self.tz)
return self
def take_data(self):
""" return the values & release the memory """
self.values, values = None, self.values
return values
@property
def attrs(self):
return self.table._v_attrs
@property
def description(self):
return self.table.description
@property
def col(self):
""" return my current col description """
return getattr(self.description, self.cname, None)
@property
def cvalues(self):
""" return my cython values """
return self.values
def __iter__(self):
return iter(self.values)
def maybe_set_size(self, min_itemsize=None):
""" maybe set a string col itemsize:
min_itemsize can be an integer or a dict with this columns name
with an integer size """
if _ensure_decoded(self.kind) == u'string':
if isinstance(min_itemsize, dict):
min_itemsize = min_itemsize.get(self.name)
if min_itemsize is not None and self.typ.itemsize < min_itemsize:
self.typ = _tables(
).StringCol(itemsize=min_itemsize, pos=self.pos)
def validate(self, handler, append):
self.validate_names()
def validate_names(self):
pass
def validate_and_set(self, handler, append):
self.set_table(handler.table)
self.validate_col()
self.validate_attr(append)
self.validate_metadata(handler)
self.write_metadata(handler)
self.set_attr()
def validate_col(self, itemsize=None):
""" validate this column: return the compared against itemsize """
# validate this column for string truncation (or reset to the max size)
if _ensure_decoded(self.kind) == u'string':
c = self.col
if c is not None:
if itemsize is None:
itemsize = self.itemsize
if c.itemsize < itemsize:
raise ValueError(
"Trying to store a string with len [{itemsize}] in "
"[{cname}] column but\nthis column has a limit of "
"[{c_itemsize}]!\nConsider using min_itemsize to "
"preset the sizes on these columns".format(
itemsize=itemsize, cname=self.cname,
c_itemsize=c.itemsize))
return c.itemsize
return None
def validate_attr(self, append):
# check for backwards incompatibility
if append:
existing_kind = getattr(self.attrs, self.kind_attr, None)
if existing_kind is not None and existing_kind != self.kind:
raise TypeError(
"incompatible kind in col [{existing} - "
"{self_kind}]".format(
existing=existing_kind, self_kind=self.kind))
def update_info(self, info):
""" set/update the info for this indexable with the key/value
if there is a conflict raise/warn as needed """
for key in self._info_fields:
value = getattr(self, key, None)
idx = _get_info(info, self.name)
existing_value = idx.get(key)
if key in idx and value is not None and existing_value != value:
# frequency/name just warn
if key in ['freq', 'index_name']:
ws = attribute_conflict_doc % (key, existing_value, value)
warnings.warn(ws, AttributeConflictWarning, stacklevel=6)
# reset
idx[key] = None
setattr(self, key, None)
else:
raise ValueError(
"invalid info for [{name}] for [{key}], "
"existing_value [{existing_value}] conflicts with "
"new value [{value}]".format(
name=self.name, key=key,
existing_value=existing_value, value=value))
else:
if value is not None or existing_value is not None:
idx[key] = value
return self
def set_info(self, info):
""" set my state from the passed info """
idx = info.get(self.name)
if idx is not None:
self.__dict__.update(idx)
def get_attr(self):
""" set the kind for this column """
self.kind = getattr(self.attrs, self.kind_attr, None)
def set_attr(self):
""" set the kind for this column """
setattr(self.attrs, self.kind_attr, self.kind)
def read_metadata(self, handler):
""" retrieve the metadata for this columns """
self.metadata = handler.read_metadata(self.cname)
def validate_metadata(self, handler):
""" validate that kind=category does not change the categories """
if self.meta == 'category':
new_metadata = self.metadata
cur_metadata = handler.read_metadata(self.cname)
if (new_metadata is not None and cur_metadata is not None and
not array_equivalent(new_metadata, cur_metadata)):
raise ValueError("cannot append a categorical with "
"different categories to the existing")
def write_metadata(self, handler):
""" set the meta data """
if self.metadata is not None:
handler.write_metadata(self.cname, self.metadata)
class GenericIndexCol(IndexCol):
""" an index which is not represented in the data of the table """
@property
def is_indexed(self):
return False
def convert(self, values, nan_rep, encoding, errors):
""" set the values from this selection: take = take ownership """
self.values = Int64Index(np.arange(self.table.nrows))
return self
def get_attr(self):
pass
def set_attr(self):
pass
class DataCol(IndexCol):
""" a data holding column, by definition this is not indexable
Parameters
----------
data : the actual data
cname : the column name in the table to hold the data (typically
values)
meta : a string description of the metadata
metadata : the actual metadata
"""
is_an_indexable = False
is_data_indexable = False
_info_fields = ['tz', 'ordered']
@classmethod
def create_for_block(
cls, i=None, name=None, cname=None, version=None, **kwargs):
""" return a new datacol with the block i """
if cname is None:
cname = name or 'values_block_{idx}'.format(idx=i)
if name is None:
name = cname
# prior to 0.10.1, we named values blocks like: values_block_0 an the
# name values_0
try:
if version[0] == 0 and version[1] <= 10 and version[2] == 0:
m = re.search(r"values_block_(\d+)", name)
if m:
name = "values_{group}".format(group=m.groups()[0])
except IndexError:
pass
return cls(name=name, cname=cname, **kwargs)
def __init__(self, values=None, kind=None, typ=None,
cname=None, data=None, meta=None, metadata=None,
block=None, **kwargs):
super(DataCol, self).__init__(values=values, kind=kind, typ=typ,
cname=cname, **kwargs)
self.dtype = None
self.dtype_attr = u'{name}_dtype'.format(name=self.name)
self.meta = meta
self.meta_attr = u'{name}_meta'.format(name=self.name)
self.set_data(data)
self.set_metadata(metadata)
def __unicode__(self):
temp = tuple(
map(pprint_thing,
(self.name,
self.cname,
self.dtype,
self.kind,
self.shape)))
return ','.join(("{key}->{value}".format(key=key, value=value)
for key, value in zip(
['name', 'cname', 'dtype', 'kind', 'shape'], temp)))
def __eq__(self, other):
""" compare 2 col items """
return all(getattr(self, a, None) == getattr(other, a, None)
for a in ['name', 'cname', 'dtype', 'pos'])
def set_data(self, data, dtype=None):
self.data = data
if data is not None:
if dtype is not None:
self.dtype = dtype
self.set_kind()
elif self.dtype is None:
self.dtype = data.dtype.name
self.set_kind()
def take_data(self):
""" return the data & release the memory """
self.data, data = None, self.data
return data
def set_metadata(self, metadata):
""" record the metadata """
if metadata is not None:
metadata = np.array(metadata, copy=False).ravel()
self.metadata = metadata
def set_kind(self):
# set my kind if we can
if self.dtype is not None:
dtype = _ensure_decoded(self.dtype)
if dtype.startswith(u'string') or dtype.startswith(u'bytes'):
self.kind = 'string'
elif dtype.startswith(u'float'):
self.kind = 'float'
elif dtype.startswith(u'complex'):
self.kind = 'complex'
elif dtype.startswith(u'int') or dtype.startswith(u'uint'):
self.kind = 'integer'
elif dtype.startswith(u'date'):
self.kind = 'datetime'
elif dtype.startswith(u'timedelta'):
self.kind = 'timedelta'
elif dtype.startswith(u'bool'):
self.kind = 'bool'
else:
raise AssertionError(
"cannot interpret dtype of [{dtype}] in [{obj}]".format(
dtype=dtype, obj=self))
# set my typ if we need
if self.typ is None:
self.typ = getattr(self.description, self.cname, None)
def set_atom(self, block, block_items, existing_col, min_itemsize,
nan_rep, info, encoding=None, errors='strict'):
""" create and setup my atom from the block b """
self.values = list(block_items)
# short-cut certain block types
if block.is_categorical:
return self.set_atom_categorical(block, items=block_items,
info=info)
elif block.is_datetimetz:
return self.set_atom_datetime64tz(block, info=info)
elif block.is_datetime:
return self.set_atom_datetime64(block)
elif block.is_timedelta:
return self.set_atom_timedelta64(block)
elif block.is_complex:
return self.set_atom_complex(block)
dtype = block.dtype.name
inferred_type = lib.infer_dtype(block.values, skipna=False)
if inferred_type == 'date':
raise TypeError(
"[date] is not implemented as a table column")
elif inferred_type == 'datetime':
# after 8260
# this only would be hit for a mutli-timezone dtype
# which is an error
raise TypeError(
"too many timezones in this block, create separate "
"data columns"
)
elif inferred_type == 'unicode':
raise TypeError(
"[unicode] is not implemented as a table column")
# this is basically a catchall; if say a datetime64 has nans then will
# end up here ###
elif inferred_type == 'string' or dtype == 'object':
self.set_atom_string(
block, block_items,
existing_col,
min_itemsize,
nan_rep,
encoding,
errors)
# set as a data block
else:
self.set_atom_data(block)
def get_atom_string(self, block, itemsize):
return _tables().StringCol(itemsize=itemsize, shape=block.shape[0])
def set_atom_string(self, block, block_items, existing_col, min_itemsize,
nan_rep, encoding, errors):
# fill nan items with myself, don't disturb the blocks by
# trying to downcast
block = block.fillna(nan_rep, downcast=False)
if isinstance(block, list):
block = block[0]
data = block.values
# see if we have a valid string type
inferred_type = lib.infer_dtype(data.ravel(), skipna=False)
if inferred_type != 'string':
# we cannot serialize this data, so report an exception on a column
# by column basis
for i, item in enumerate(block_items):
col = block.iget(i)
inferred_type = lib.infer_dtype(col.ravel(), skipna=False)
if inferred_type != 'string':
raise TypeError(
"Cannot serialize the column [{item}] because\n"
"its data contents are [{type}] object dtype".format(
item=item, type=inferred_type)
)
# itemsize is the maximum length of a string (along any dimension)
data_converted = _convert_string_array(data, encoding, errors)
itemsize = data_converted.itemsize
# specified min_itemsize?
if isinstance(min_itemsize, dict):
min_itemsize = int(min_itemsize.get(
self.name) or min_itemsize.get('values') or 0)
itemsize = max(min_itemsize or 0, itemsize)
# check for column in the values conflicts
if existing_col is not None:
eci = existing_col.validate_col(itemsize)
if eci > itemsize:
itemsize = eci
self.itemsize = itemsize
self.kind = 'string'
self.typ = self.get_atom_string(block, itemsize)
self.set_data(data_converted.astype(
'|S{size}'.format(size=itemsize), copy=False))
def get_atom_coltype(self, kind=None):
""" return the PyTables column class for this column """
if kind is None:
kind = self.kind
if self.kind.startswith('uint'):
col_name = "UInt{name}Col".format(name=kind[4:])
else:
col_name = "{name}Col".format(name=kind.capitalize())
return getattr(_tables(), col_name)
def get_atom_data(self, block, kind=None):
return self.get_atom_coltype(kind=kind)(shape=block.shape[0])
def set_atom_complex(self, block):
self.kind = block.dtype.name
itemsize = int(self.kind.split('complex')[-1]) // 8
self.typ = _tables().ComplexCol(
itemsize=itemsize, shape=block.shape[0])
self.set_data(block.values.astype(self.typ.type, copy=False))
def set_atom_data(self, block):
self.kind = block.dtype.name
self.typ = self.get_atom_data(block)
self.set_data(block.values.astype(self.typ.type, copy=False))
def set_atom_categorical(self, block, items, info=None, values=None):
# currently only supports a 1-D categorical
# in a 1-D block
values = block.values
codes = values.codes
self.kind = 'integer'
self.dtype = codes.dtype.name
if values.ndim > 1:
raise NotImplementedError("only support 1-d categoricals")
if len(items) > 1:
raise NotImplementedError("only support single block categoricals")
# write the codes; must be in a block shape
self.ordered = values.ordered
self.typ = self.get_atom_data(block, kind=codes.dtype.name)
self.set_data(_block_shape(codes))
# write the categories
self.meta = 'category'
self.set_metadata(block.values.categories)
# update the info
self.update_info(info)
def get_atom_datetime64(self, block):
return _tables().Int64Col(shape=block.shape[0])
def set_atom_datetime64(self, block, values=None):
self.kind = 'datetime64'
self.typ = self.get_atom_datetime64(block)
if values is None:
values = block.values.view('i8')
self.set_data(values, 'datetime64')
def set_atom_datetime64tz(self, block, info, values=None):
if values is None:
values = block.values
# convert this column to i8 in UTC, and save the tz
values = values.asi8.reshape(block.shape)
# store a converted timezone
self.tz = _get_tz(block.values.tz)
self.update_info(info)
self.kind = 'datetime64'
self.typ = self.get_atom_datetime64(block)
self.set_data(values, 'datetime64')
def get_atom_timedelta64(self, block):
return _tables().Int64Col(shape=block.shape[0])
def set_atom_timedelta64(self, block, values=None):
self.kind = 'timedelta64'
self.typ = self.get_atom_timedelta64(block)
if values is None:
values = block.values.view('i8')
self.set_data(values, 'timedelta64')
@property
def shape(self):
return getattr(self.data, 'shape', None)
@property
def cvalues(self):
""" return my cython values """
return self.data
def validate_attr(self, append):
"""validate that we have the same order as the existing & same dtype"""
if append:
existing_fields = getattr(self.attrs, self.kind_attr, None)
if (existing_fields is not None and
existing_fields != list(self.values)):
raise ValueError("appended items do not match existing items"
" in table!")
existing_dtype = getattr(self.attrs, self.dtype_attr, None)
if (existing_dtype is not None and
existing_dtype != self.dtype):
raise ValueError("appended items dtype do not match existing "
"items dtype in table!")
def convert(self, values, nan_rep, encoding, errors):
"""set the data from this selection (and convert to the correct dtype
if we can)
"""
# values is a recarray
if values.dtype.fields is not None:
values = values[self.cname]
self.set_data(values)
# use the meta if needed
meta = _ensure_decoded(self.meta)
# convert to the correct dtype
if self.dtype is not None:
dtype = _ensure_decoded(self.dtype)
# reverse converts
if dtype == u'datetime64':
# recreate with tz if indicated
self.data = _set_tz(self.data, self.tz, coerce=True)
elif dtype == u'timedelta64':
self.data = np.asarray(self.data, dtype='m8[ns]')
elif dtype == u'date':
try:
self.data = np.asarray(
[date.fromordinal(v) for v in self.data], dtype=object)
except ValueError:
self.data = np.asarray(
[date.fromtimestamp(v) for v in self.data],
dtype=object)
elif dtype == u'datetime':
self.data = np.asarray(
[datetime.fromtimestamp(v) for v in self.data],
dtype=object)
elif meta == u'category':
# we have a categorical
categories = self.metadata
codes = self.data.ravel()
# if we have stored a NaN in the categories
# then strip it; in theory we could have BOTH
# -1s in the codes and nulls :<
if categories is None:
# Handle case of NaN-only categorical columns in which case
# the categories are an empty array; when this is stored,
# pytables cannot write a zero-len array, so on readback
# the categories would be None and `read_hdf()` would fail.
categories = Index([], dtype=np.float64)
else:
mask = isna(categories)
if mask.any():
categories = categories[~mask]
codes[codes != -1] -= mask.astype(int).cumsum().values
self.data = Categorical.from_codes(codes,
categories=categories,
ordered=self.ordered)
else:
try:
self.data = self.data.astype(dtype, copy=False)
except TypeError:
self.data = self.data.astype('O', copy=False)
# convert nans / decode
if _ensure_decoded(self.kind) == u'string':
self.data = _unconvert_string_array(
self.data, nan_rep=nan_rep, encoding=encoding, errors=errors)
return self
def get_attr(self):
""" get the data for this column """
self.values = getattr(self.attrs, self.kind_attr, None)
self.dtype = getattr(self.attrs, self.dtype_attr, None)
self.meta = getattr(self.attrs, self.meta_attr, None)
self.set_kind()
def set_attr(self):
""" set the data for this column """
setattr(self.attrs, self.kind_attr, self.values)
setattr(self.attrs, self.meta_attr, self.meta)
if self.dtype is not None:
setattr(self.attrs, self.dtype_attr, self.dtype)
class DataIndexableCol(DataCol):
""" represent a data column that can be indexed """
is_data_indexable = True
def validate_names(self):
if not Index(self.values).is_object():
raise ValueError("cannot have non-object label DataIndexableCol")
def get_atom_string(self, block, itemsize):
return _tables().StringCol(itemsize=itemsize)
def get_atom_data(self, block, kind=None):
return self.get_atom_coltype(kind=kind)()
def get_atom_datetime64(self, block):
return _tables().Int64Col()
def get_atom_timedelta64(self, block):
return _tables().Int64Col()
class GenericDataIndexableCol(DataIndexableCol):
""" represent a generic pytables data column """
def get_attr(self):
pass
class Fixed(StringMixin):
""" represent an object in my store
facilitate read/write of various types of objects
this is an abstract base class
Parameters
----------
parent : my parent HDFStore
group : the group node where the table resides
"""
pandas_kind = None
obj_type = None
ndim = None
is_table = False
def __init__(self, parent, group, encoding=None, errors='strict',
**kwargs):
self.parent = parent
self.group = group
self.encoding = _ensure_encoding(encoding)
self.errors = errors
self.set_version()
@property
def is_old_version(self):
return (self.version[0] <= 0 and self.version[1] <= 10 and
self.version[2] < 1)
def set_version(self):
""" compute and set our version """
version = _ensure_decoded(
getattr(self.group._v_attrs, 'pandas_version', None))
try:
self.version = tuple(int(x) for x in version.split('.'))
if len(self.version) == 2:
self.version = self.version + (0,)
except AttributeError:
self.version = (0, 0, 0)
@property
def pandas_type(self):
return _ensure_decoded(getattr(self.group._v_attrs,
'pandas_type', None))
@property
def format_type(self):
return 'fixed'
def __unicode__(self):
""" return a pretty representation of myself """
self.infer_axes()
s = self.shape
if s is not None:
if isinstance(s, (list, tuple)):
s = "[{shape}]".format(
shape=','.join(pprint_thing(x) for x in s))
return "{type:12.12} (shape->{shape})".format(
type=self.pandas_type, shape=s)
return self.pandas_type
def set_object_info(self):
""" set my pandas type & version """
self.attrs.pandas_type = str(self.pandas_kind)
self.attrs.pandas_version = str(_version)
self.set_version()
def copy(self):
new_self = copy.copy(self)
return new_self
@property
def storage_obj_type(self):
return self.obj_type
@property
def shape(self):
return self.nrows
@property
def pathname(self):
return self.group._v_pathname
@property
def _handle(self):
return self.parent._handle
@property
def _filters(self):
return self.parent._filters
@property
def _complevel(self):
return self.parent._complevel
@property
def _fletcher32(self):
return self.parent._fletcher32
@property
def _complib(self):
return self.parent._complib
@property
def attrs(self):
return self.group._v_attrs
def set_attrs(self):
""" set our object attributes """
pass
def get_attrs(self):
""" get our object attributes """
pass
@property
def storable(self):
""" return my storable """
return self.group
@property
def is_exists(self):
return False
@property
def nrows(self):
return getattr(self.storable, 'nrows', None)
def validate(self, other):
""" validate against an existing storable """
if other is None:
return
return True
def validate_version(self, where=None):
""" are we trying to operate on an old version? """
return True
def infer_axes(self):
""" infer the axes of my storer
return a boolean indicating if we have a valid storer or not """
s = self.storable
if s is None:
return False
self.get_attrs()
return True
def read(self, **kwargs):
raise NotImplementedError(
"cannot read on an abstract storer: subclasses should implement")
def write(self, **kwargs):
raise NotImplementedError(
"cannot write on an abstract storer: sublcasses should implement")
def delete(self, where=None, start=None, stop=None, **kwargs):
"""
support fully deleting the node in its entirety (only) - where
specification must be None
"""
if com._all_none(where, start, stop):
self._handle.remove_node(self.group, recursive=True)
return None
raise TypeError("cannot delete on an abstract storer")
class GenericFixed(Fixed):
""" a generified fixed version """
_index_type_map = {DatetimeIndex: 'datetime', PeriodIndex: 'period'}
_reverse_index_map = {v: k for k, v in compat.iteritems(_index_type_map)}
attributes = []
# indexer helpders
def _class_to_alias(self, cls):
return self._index_type_map.get(cls, '')
def _alias_to_class(self, alias):
if isinstance(alias, type): # pragma: no cover
# compat: for a short period of time master stored types
return alias
return self._reverse_index_map.get(alias, Index)
def _get_index_factory(self, klass):
if klass == DatetimeIndex:
def f(values, freq=None, tz=None):
# data are already in UTC, localize and convert if tz present
result = DatetimeIndex._simple_new(values.values, name=None,
freq=freq)
if tz is not None:
result = result.tz_localize('UTC').tz_convert(tz)
return result
return f
elif klass == PeriodIndex:
def f(values, freq=None, tz=None):
return PeriodIndex._simple_new(values, name=None, freq=freq)
return f
return klass
def validate_read(self, kwargs):
"""
remove table keywords from kwargs and return
raise if any keywords are passed which are not-None
"""
kwargs = copy.copy(kwargs)
columns = kwargs.pop('columns', None)
if columns is not None:
raise TypeError("cannot pass a column specification when reading "
"a Fixed format store. this store must be "
"selected in its entirety")
where = kwargs.pop('where', None)
if where is not None:
raise TypeError("cannot pass a where specification when reading "
"from a Fixed format store. this store must be "
"selected in its entirety")
return kwargs
@property
def is_exists(self):
return True
def set_attrs(self):
""" set our object attributes """
self.attrs.encoding = self.encoding
self.attrs.errors = self.errors
def get_attrs(self):
""" retrieve our attributes """
self.encoding = _ensure_encoding(getattr(self.attrs, 'encoding', None))
self.errors = _ensure_decoded(getattr(self.attrs, 'errors', 'strict'))
for n in self.attributes:
setattr(self, n, _ensure_decoded(getattr(self.attrs, n, None)))
def write(self, obj, **kwargs):
self.set_attrs()
def read_array(self, key, start=None, stop=None):
""" read an array for the specified node (off of group """
import tables
node = getattr(self.group, key)
attrs = node._v_attrs
transposed = getattr(attrs, 'transposed', False)
if isinstance(node, tables.VLArray):
ret = node[0][start:stop]
else:
dtype = getattr(attrs, 'value_type', None)
shape = getattr(attrs, 'shape', None)
if shape is not None:
# length 0 axis
ret = np.empty(shape, dtype=dtype)
else:
ret = node[start:stop]
if dtype == u'datetime64':
# reconstruct a timezone if indicated
ret = _set_tz(ret, getattr(attrs, 'tz', None), coerce=True)
elif dtype == u'timedelta64':
ret = np.asarray(ret, dtype='m8[ns]')
if transposed:
return ret.T
else:
return ret
def read_index(self, key, **kwargs):
variety = _ensure_decoded(
getattr(self.attrs, '{key}_variety'.format(key=key)))
if variety == u'multi':
return self.read_multi_index(key, **kwargs)
elif variety == u'block':
return self.read_block_index(key, **kwargs)
elif variety == u'sparseint':
return self.read_sparse_intindex(key, **kwargs)
elif variety == u'regular':
_, index = self.read_index_node(getattr(self.group, key), **kwargs)
return index
else: # pragma: no cover
raise TypeError(
'unrecognized index variety: {variety}'.format(
variety=variety))
def write_index(self, key, index):
if isinstance(index, MultiIndex):
setattr(self.attrs, '{key}_variety'.format(key=key), 'multi')
self.write_multi_index(key, index)
elif isinstance(index, BlockIndex):
setattr(self.attrs, '{key}_variety'.format(key=key), 'block')
self.write_block_index(key, index)
elif isinstance(index, IntIndex):
setattr(self.attrs, '{key}_variety'.format(key=key), 'sparseint')
self.write_sparse_intindex(key, index)
else:
setattr(self.attrs, '{key}_variety'.format(key=key), 'regular')
converted = _convert_index(index, self.encoding, self.errors,
self.format_type).set_name('index')
self.write_array(key, converted.values)
node = getattr(self.group, key)
node._v_attrs.kind = converted.kind
node._v_attrs.name = index.name
if isinstance(index, (DatetimeIndex, PeriodIndex)):
node._v_attrs.index_class = self._class_to_alias(type(index))
if hasattr(index, 'freq'):
node._v_attrs.freq = index.freq
if hasattr(index, 'tz') and index.tz is not None:
node._v_attrs.tz = _get_tz(index.tz)
def write_block_index(self, key, index):
self.write_array('{key}_blocs'.format(key=key), index.blocs)
self.write_array('{key}_blengths'.format(key=key), index.blengths)
setattr(self.attrs, '{key}_length'.format(key=key), index.length)
def read_block_index(self, key, **kwargs):
length = getattr(self.attrs, '{key}_length'.format(key=key))
blocs = self.read_array('{key}_blocs'.format(key=key), **kwargs)
blengths = self.read_array('{key}_blengths'.format(key=key), **kwargs)
return BlockIndex(length, blocs, blengths)
def write_sparse_intindex(self, key, index):
self.write_array('{key}_indices'.format(key=key), index.indices)
setattr(self.attrs, '{key}_length'.format(key=key), index.length)
def read_sparse_intindex(self, key, **kwargs):
length = getattr(self.attrs, '{key}_length'.format(key=key))
indices = self.read_array('{key}_indices'.format(key=key), **kwargs)
return IntIndex(length, indices)
def write_multi_index(self, key, index):
setattr(self.attrs, '{key}_nlevels'.format(key=key), index.nlevels)
for i, (lev, level_codes, name) in enumerate(zip(index.levels,
index.codes,
index.names)):
# write the level
level_key = '{key}_level{idx}'.format(key=key, idx=i)
conv_level = _convert_index(lev, self.encoding, self.errors,
self.format_type).set_name(level_key)
self.write_array(level_key, conv_level.values)
node = getattr(self.group, level_key)
node._v_attrs.kind = conv_level.kind
node._v_attrs.name = name
# write the name
setattr(node._v_attrs, '{key}_name{name}'.format(
key=key, name=name), name)
# write the labels
label_key = '{key}_label{idx}'.format(key=key, idx=i)
self.write_array(label_key, level_codes)
def read_multi_index(self, key, **kwargs):
nlevels = getattr(self.attrs, '{key}_nlevels'.format(key=key))
levels = []
codes = []
names = []
for i in range(nlevels):
level_key = '{key}_level{idx}'.format(key=key, idx=i)
name, lev = self.read_index_node(getattr(self.group, level_key),
**kwargs)
levels.append(lev)
names.append(name)
label_key = '{key}_label{idx}'.format(key=key, idx=i)
level_codes = self.read_array(label_key, **kwargs)
codes.append(level_codes)
return MultiIndex(levels=levels, codes=codes, names=names,
verify_integrity=True)
def read_index_node(self, node, start=None, stop=None):
data = node[start:stop]
# If the index was an empty array write_array_empty() will
# have written a sentinel. Here we relace it with the original.
if ('shape' in node._v_attrs and
self._is_empty_array(getattr(node._v_attrs, 'shape'))):
data = np.empty(getattr(node._v_attrs, 'shape'),
dtype=getattr(node._v_attrs, 'value_type'))
kind = _ensure_decoded(node._v_attrs.kind)
name = None
if 'name' in node._v_attrs:
name = _ensure_str(node._v_attrs.name)
name = _ensure_decoded(name)
index_class = self._alias_to_class(_ensure_decoded(
getattr(node._v_attrs, 'index_class', '')))
factory = self._get_index_factory(index_class)
kwargs = {}
if u'freq' in node._v_attrs:
kwargs['freq'] = node._v_attrs['freq']
if u'tz' in node._v_attrs:
kwargs['tz'] = node._v_attrs['tz']
if kind in (u'date', u'datetime'):
index = factory(_unconvert_index(data, kind,
encoding=self.encoding,
errors=self.errors),
dtype=object, **kwargs)
else:
index = factory(_unconvert_index(data, kind,
encoding=self.encoding,
errors=self.errors), **kwargs)
index.name = name
return name, index
def write_array_empty(self, key, value):
""" write a 0-len array """
# ugly hack for length 0 axes
arr = np.empty((1,) * value.ndim)
self._handle.create_array(self.group, key, arr)
getattr(self.group, key)._v_attrs.value_type = str(value.dtype)
getattr(self.group, key)._v_attrs.shape = value.shape
def _is_empty_array(self, shape):
"""Returns true if any axis is zero length."""
return any(x == 0 for x in shape)
def write_array(self, key, value, items=None):
if key in self.group:
self._handle.remove_node(self.group, key)
# Transform needed to interface with pytables row/col notation
empty_array = self._is_empty_array(value.shape)
transposed = False
if is_categorical_dtype(value):
raise NotImplementedError('Cannot store a category dtype in '
'a HDF5 dataset that uses format='
'"fixed". Use format="table".')
if not empty_array:
if hasattr(value, 'T'):
# ExtensionArrays (1d) may not have transpose.
value = value.T
transposed = True
if self._filters is not None:
atom = None
try:
# get the atom for this datatype
atom = _tables().Atom.from_dtype(value.dtype)
except ValueError:
pass
if atom is not None:
# create an empty chunked array and fill it from value
if not empty_array:
ca = self._handle.create_carray(self.group, key, atom,
value.shape,
filters=self._filters)
ca[:] = value
getattr(self.group, key)._v_attrs.transposed = transposed
else:
self.write_array_empty(key, value)
return
if value.dtype.type == np.object_:
# infer the type, warn if we have a non-string type here (for
# performance)
inferred_type = lib.infer_dtype(value.ravel(), skipna=False)
if empty_array:
pass
elif inferred_type == 'string':
pass
else:
try:
items = list(items)
except TypeError:
pass
ws = performance_doc % (inferred_type, key, items)
warnings.warn(ws, PerformanceWarning, stacklevel=7)
vlarr = self._handle.create_vlarray(self.group, key,
_tables().ObjectAtom())
vlarr.append(value)
else:
if empty_array:
self.write_array_empty(key, value)
else:
if is_datetime64_dtype(value.dtype):
self._handle.create_array(
self.group, key, value.view('i8'))
getattr(
self.group, key)._v_attrs.value_type = 'datetime64'
elif is_datetime64tz_dtype(value.dtype):
# store as UTC
# with a zone
self._handle.create_array(self.group, key,
value.asi8)
node = getattr(self.group, key)
node._v_attrs.tz = _get_tz(value.tz)
node._v_attrs.value_type = 'datetime64'
elif is_timedelta64_dtype(value.dtype):
self._handle.create_array(
self.group, key, value.view('i8'))
getattr(
self.group, key)._v_attrs.value_type = 'timedelta64'
else:
self._handle.create_array(self.group, key, value)
getattr(self.group, key)._v_attrs.transposed = transposed
class LegacyFixed(GenericFixed):
def read_index_legacy(self, key, start=None, stop=None):
node = getattr(self.group, key)
data = node[start:stop]
kind = node._v_attrs.kind
return _unconvert_index_legacy(data, kind, encoding=self.encoding,
errors=self.errors)
class LegacySeriesFixed(LegacyFixed):
def read(self, **kwargs):
kwargs = self.validate_read(kwargs)
index = self.read_index_legacy('index')
values = self.read_array('values')
return Series(values, index=index)
class LegacyFrameFixed(LegacyFixed):
def read(self, **kwargs):
kwargs = self.validate_read(kwargs)
index = self.read_index_legacy('index')
columns = self.read_index_legacy('columns')
values = self.read_array('values')
return DataFrame(values, index=index, columns=columns)
class SeriesFixed(GenericFixed):
pandas_kind = u'series'
attributes = ['name']
@property
def shape(self):
try:
return len(getattr(self.group, 'values')),
except (TypeError, AttributeError):
return None
def read(self, **kwargs):
kwargs = self.validate_read(kwargs)
index = self.read_index('index', **kwargs)
values = self.read_array('values', **kwargs)
return Series(values, index=index, name=self.name)
def write(self, obj, **kwargs):
super(SeriesFixed, self).write(obj, **kwargs)
self.write_index('index', obj.index)
self.write_array('values', obj.values)
self.attrs.name = obj.name
class SparseFixed(GenericFixed):
def validate_read(self, kwargs):
"""
we don't support start, stop kwds in Sparse
"""
kwargs = super(SparseFixed, self).validate_read(kwargs)
if 'start' in kwargs or 'stop' in kwargs:
raise NotImplementedError("start and/or stop are not supported "
"in fixed Sparse reading")
return kwargs
class SparseSeriesFixed(SparseFixed):
pandas_kind = u'sparse_series'
attributes = ['name', 'fill_value', 'kind']
def read(self, **kwargs):
kwargs = self.validate_read(kwargs)
index = self.read_index('index')
sp_values = self.read_array('sp_values')
sp_index = self.read_index('sp_index')
return SparseSeries(sp_values, index=index, sparse_index=sp_index,
kind=self.kind or u'block',
fill_value=self.fill_value,
name=self.name)
def write(self, obj, **kwargs):
super(SparseSeriesFixed, self).write(obj, **kwargs)
self.write_index('index', obj.index)
self.write_index('sp_index', obj.sp_index)
self.write_array('sp_values', obj.sp_values)
self.attrs.name = obj.name
self.attrs.fill_value = obj.fill_value
self.attrs.kind = obj.kind
class SparseFrameFixed(SparseFixed):
pandas_kind = u'sparse_frame'
attributes = ['default_kind', 'default_fill_value']
def read(self, **kwargs):
kwargs = self.validate_read(kwargs)
columns = self.read_index('columns')
sdict = {}
for c in columns:
key = 'sparse_series_{columns}'.format(columns=c)
s = SparseSeriesFixed(self.parent, getattr(self.group, key))
s.infer_axes()
sdict[c] = s.read()
return SparseDataFrame(sdict, columns=columns,
default_kind=self.default_kind,
default_fill_value=self.default_fill_value)
def write(self, obj, **kwargs):
""" write it as a collection of individual sparse series """
super(SparseFrameFixed, self).write(obj, **kwargs)
for name, ss in compat.iteritems(obj):
key = 'sparse_series_{name}'.format(name=name)
if key not in self.group._v_children:
node = self._handle.create_group(self.group, key)
else:
node = getattr(self.group, key)
s = SparseSeriesFixed(self.parent, node)
s.write(ss)
self.attrs.default_fill_value = obj.default_fill_value
self.attrs.default_kind = obj.default_kind
self.write_index('columns', obj.columns)
class BlockManagerFixed(GenericFixed):
attributes = ['ndim', 'nblocks']
is_shape_reversed = False
@property
def shape(self):
try:
ndim = self.ndim
# items
items = 0
for i in range(self.nblocks):
node = getattr(self.group, 'block{idx}_items'.format(idx=i))
shape = getattr(node, 'shape', None)
if shape is not None:
items += shape[0]
# data shape
node = getattr(self.group, 'block0_values')
shape = getattr(node, 'shape', None)
if shape is not None:
shape = list(shape[0:(ndim - 1)])
else:
shape = []
shape.append(items)
# hacky - this works for frames, but is reversed for panels
if self.is_shape_reversed:
shape = shape[::-1]
return shape
except AttributeError:
return None
def read(self, start=None, stop=None, **kwargs):
# start, stop applied to rows, so 0th axis only
kwargs = self.validate_read(kwargs)
select_axis = self.obj_type()._get_block_manager_axis(0)
axes = []
for i in range(self.ndim):
_start, _stop = (start, stop) if i == select_axis else (None, None)
ax = self.read_index('axis{idx}'.format(
idx=i), start=_start, stop=_stop)
axes.append(ax)
items = axes[0]
blocks = []
for i in range(self.nblocks):
blk_items = self.read_index('block{idx}_items'.format(idx=i))
values = self.read_array('block{idx}_values'.format(idx=i),
start=_start, stop=_stop)
blk = make_block(values,
placement=items.get_indexer(blk_items))
blocks.append(blk)
return self.obj_type(BlockManager(blocks, axes))
def write(self, obj, **kwargs):
super(BlockManagerFixed, self).write(obj, **kwargs)
data = obj._data
if not data.is_consolidated():
data = data.consolidate()
self.attrs.ndim = data.ndim
for i, ax in enumerate(data.axes):
if i == 0:
if not ax.is_unique:
raise ValueError(
"Columns index has to be unique for fixed format")
self.write_index('axis{idx}'.format(idx=i), ax)
# Supporting mixed-type DataFrame objects...nontrivial
self.attrs.nblocks = len(data.blocks)
for i, blk in enumerate(data.blocks):
# I have no idea why, but writing values before items fixed #2299
blk_items = data.items.take(blk.mgr_locs)
self.write_array('block{idx}_values'.format(idx=i),
blk.values, items=blk_items)
self.write_index('block{idx}_items'.format(idx=i), blk_items)
class FrameFixed(BlockManagerFixed):
pandas_kind = u'frame'
obj_type = DataFrame
class PanelFixed(BlockManagerFixed):
pandas_kind = u'wide'
obj_type = Panel
is_shape_reversed = True
def write(self, obj, **kwargs):
obj._consolidate_inplace()
return super(PanelFixed, self).write(obj, **kwargs)
class Table(Fixed):
""" represent a table:
facilitate read/write of various types of tables
Attrs in Table Node
-------------------
These are attributes that are store in the main table node, they are
necessary to recreate these tables when read back in.
index_axes : a list of tuples of the (original indexing axis and
index column)
non_index_axes: a list of tuples of the (original index axis and
columns on a non-indexing axis)
values_axes : a list of the columns which comprise the data of this
table
data_columns : a list of the columns that we are allowing indexing
(these become single columns in values_axes), or True to force all
columns
nan_rep : the string to use for nan representations for string
objects
levels : the names of levels
metadata : the names of the metadata columns
"""
pandas_kind = u'wide_table'
table_type = None
levels = 1
is_table = True
is_shape_reversed = False
def __init__(self, *args, **kwargs):
super(Table, self).__init__(*args, **kwargs)
self.index_axes = []
self.non_index_axes = []
self.values_axes = []
self.data_columns = []
self.metadata = []
self.info = dict()
self.nan_rep = None
self.selection = None
@property
def table_type_short(self):
return self.table_type.split('_')[0]
@property
def format_type(self):
return 'table'
def __unicode__(self):
""" return a pretty representatgion of myself """
self.infer_axes()
dc = ",dc->[{columns}]".format(columns=(','.join(
self.data_columns) if len(self.data_columns) else ''))
ver = ''
if self.is_old_version:
ver = "[{version}]".format(
version='.'.join(str(x) for x in self.version))
return (
"{pandas_type:12.12}{ver} (typ->{table_type},nrows->{nrows},"
"ncols->{ncols},indexers->[{index_axes}]{dc})".format(
pandas_type=self.pandas_type, ver=ver,
table_type=self.table_type_short, nrows=self.nrows,
ncols=self.ncols,
index_axes=(','.join(a.name for a in self.index_axes)), dc=dc
))
def __getitem__(self, c):
""" return the axis for c """
for a in self.axes:
if c == a.name:
return a
return None
def validate(self, other):
""" validate against an existing table """
if other is None:
return
if other.table_type != self.table_type:
raise TypeError(
"incompatible table_type with existing "
"[{other} - {self}]".format(
other=other.table_type, self=self.table_type))
for c in ['index_axes', 'non_index_axes', 'values_axes']:
sv = getattr(self, c, None)
ov = getattr(other, c, None)
if sv != ov:
# show the error for the specific axes
for i, sax in enumerate(sv):
oax = ov[i]
if sax != oax:
raise ValueError(
"invalid combinate of [{c}] on appending data "
"[{sax}] vs current table [{oax}]".format(
c=c, sax=sax, oax=oax))
# should never get here
raise Exception(
"invalid combinate of [{c}] on appending data [{sv}] vs "
"current table [{ov}]".format(c=c, sv=sv, ov=ov))
@property
def is_multi_index(self):
"""the levels attribute is 1 or a list in the case of a multi-index"""
return isinstance(self.levels, list)
def validate_metadata(self, existing):
""" create / validate metadata """
self.metadata = [
c.name for c in self.values_axes if c.metadata is not None]
def validate_multiindex(self, obj):
"""validate that we can store the multi-index; reset and return the
new object
"""
levels = [l if l is not None else "level_{0}".format(i)
for i, l in enumerate(obj.index.names)]
try:
return obj.reset_index(), levels
except ValueError:
raise ValueError("duplicate names/columns in the multi-index when "
"storing as a table")
@property
def nrows_expected(self):
""" based on our axes, compute the expected nrows """
return np.prod([i.cvalues.shape[0] for i in self.index_axes])
@property
def is_exists(self):
""" has this table been created """
return u'table' in self.group
@property
def storable(self):
return getattr(self.group, 'table', None)
@property
def table(self):
""" return the table group (this is my storable) """
return self.storable
@property
def dtype(self):
return self.table.dtype
@property
def description(self):
return self.table.description
@property
def axes(self):
return itertools.chain(self.index_axes, self.values_axes)
@property
def ncols(self):
""" the number of total columns in the values axes """
return sum(len(a.values) for a in self.values_axes)
@property
def is_transposed(self):
return False
@property
def data_orientation(self):
"""return a tuple of my permutated axes, non_indexable at the front"""
return tuple(itertools.chain([int(a[0]) for a in self.non_index_axes],
[int(a.axis) for a in self.index_axes]))
def queryables(self):
""" return a dict of the kinds allowable columns for this object """
# compute the values_axes queryables
return dict(
[(a.cname, a) for a in self.index_axes] +
[(self.storage_obj_type._AXIS_NAMES[axis], None)
for axis, values in self.non_index_axes] +
[(v.cname, v) for v in self.values_axes
if v.name in set(self.data_columns)]
)
def index_cols(self):
""" return a list of my index cols """
return [(i.axis, i.cname) for i in self.index_axes]
def values_cols(self):
""" return a list of my values cols """
return [i.cname for i in self.values_axes]
def _get_metadata_path(self, key):
""" return the metadata pathname for this key """
return "{group}/meta/{key}/meta".format(group=self.group._v_pathname,
key=key)
def write_metadata(self, key, values):
"""
write out a meta data array to the key as a fixed-format Series
Parameters
----------
key : string
values : ndarray
"""
values = Series(values)
self.parent.put(self._get_metadata_path(key), values, format='table',
encoding=self.encoding, errors=self.errors,
nan_rep=self.nan_rep)
def read_metadata(self, key):
""" return the meta data array for this key """
if getattr(getattr(self.group, 'meta', None), key, None) is not None:
return self.parent.select(self._get_metadata_path(key))
return None
def set_info(self):
""" update our table index info """
self.attrs.info = self.info
def set_attrs(self):
""" set our table type & indexables """
self.attrs.table_type = str(self.table_type)
self.attrs.index_cols = self.index_cols()
self.attrs.values_cols = self.values_cols()
self.attrs.non_index_axes = self.non_index_axes
self.attrs.data_columns = self.data_columns
self.attrs.nan_rep = self.nan_rep
self.attrs.encoding = self.encoding
self.attrs.errors = self.errors
self.attrs.levels = self.levels
self.attrs.metadata = self.metadata
self.set_info()
def get_attrs(self):
""" retrieve our attributes """
self.non_index_axes = getattr(
self.attrs, 'non_index_axes', None) or []
self.data_columns = getattr(
self.attrs, 'data_columns', None) or []
self.info = getattr(
self.attrs, 'info', None) or dict()
self.nan_rep = getattr(self.attrs, 'nan_rep', None)
self.encoding = _ensure_encoding(
getattr(self.attrs, 'encoding', None))
self.errors = getattr(self.attrs, 'errors', 'strict')
self.levels = getattr(
self.attrs, 'levels', None) or []
self.index_axes = [
a.infer(self) for a in self.indexables if a.is_an_indexable
]
self.values_axes = [
a.infer(self) for a in self.indexables if not a.is_an_indexable
]
self.metadata = getattr(
self.attrs, 'metadata', None) or []
def validate_version(self, where=None):
""" are we trying to operate on an old version? """
if where is not None:
if (self.version[0] <= 0 and self.version[1] <= 10 and
self.version[2] < 1):
ws = incompatibility_doc % '.'.join(
[str(x) for x in self.version])
warnings.warn(ws, IncompatibilityWarning)
def validate_min_itemsize(self, min_itemsize):
"""validate the min_itemisze doesn't contain items that are not in the
axes this needs data_columns to be defined
"""
if min_itemsize is None:
return
if not isinstance(min_itemsize, dict):
return
q = self.queryables()
for k, v in min_itemsize.items():
# ok, apply generally
if k == 'values':
continue
if k not in q:
raise ValueError(
"min_itemsize has the key [{key}] which is not an axis or "
"data_column".format(key=k))
@property
def indexables(self):
""" create/cache the indexables if they don't exist """
if self._indexables is None:
self._indexables = []
# index columns
self._indexables.extend([
IndexCol(name=name, axis=axis, pos=i)
for i, (axis, name) in enumerate(self.attrs.index_cols)
])
# values columns
dc = set(self.data_columns)
base_pos = len(self._indexables)
def f(i, c):
klass = DataCol
if c in dc:
klass = DataIndexableCol
return klass.create_for_block(i=i, name=c, pos=base_pos + i,
version=self.version)
self._indexables.extend(
[f(i, c) for i, c in enumerate(self.attrs.values_cols)])
return self._indexables
def create_index(self, columns=None, optlevel=None, kind=None):
"""
Create a pytables index on the specified columns
note: cannot index Time64Col() or ComplexCol currently;
PyTables must be >= 3.0
Parameters
----------
columns : False (don't create an index), True (create all columns
index), None or list_like (the indexers to index)
optlevel: optimization level (defaults to 6)
kind : kind of index (defaults to 'medium')
Exceptions
----------
raises if the node is not a table
"""
if not self.infer_axes():
return
if columns is False:
return
# index all indexables and data_columns
if columns is None or columns is True:
columns = [a.cname for a in self.axes if a.is_data_indexable]
if not isinstance(columns, (tuple, list)):
columns = [columns]
kw = dict()
if optlevel is not None:
kw['optlevel'] = optlevel
if kind is not None:
kw['kind'] = kind
table = self.table
for c in columns:
v = getattr(table.cols, c, None)
if v is not None:
# remove the index if the kind/optlevel have changed
if v.is_indexed:
index = v.index
cur_optlevel = index.optlevel
cur_kind = index.kind
if kind is not None and cur_kind != kind:
v.remove_index()
else:
kw['kind'] = cur_kind
if optlevel is not None and cur_optlevel != optlevel:
v.remove_index()
else:
kw['optlevel'] = cur_optlevel
# create the index
if not v.is_indexed:
if v.type.startswith('complex'):
raise TypeError(
'Columns containing complex values can be stored '
'but cannot'
' be indexed when using table format. Either use '
'fixed format, set index=False, or do not include '
'the columns containing complex values to '
'data_columns when initializing the table.')
v.create_index(**kw)
def read_axes(self, where, **kwargs):
"""create and return the axes sniffed from the table: return boolean
for success
"""
# validate the version
self.validate_version(where)
# infer the data kind
if not self.infer_axes():
return False
# create the selection
self.selection = Selection(self, where=where, **kwargs)
values = self.selection.select()
# convert the data
for a in self.axes:
a.set_info(self.info)
a.convert(values, nan_rep=self.nan_rep, encoding=self.encoding,
errors=self.errors)
return True
def get_object(self, obj):
""" return the data for this obj """
return obj
def validate_data_columns(self, data_columns, min_itemsize):
"""take the input data_columns and min_itemize and create a data
columns spec
"""
if not len(self.non_index_axes):
return []
axis, axis_labels = self.non_index_axes[0]
info = self.info.get(axis, dict())
if info.get('type') == 'MultiIndex' and data_columns:
raise ValueError("cannot use a multi-index on axis [{0}] with "
"data_columns {1}".format(axis, data_columns))
# evaluate the passed data_columns, True == use all columns
# take only valide axis labels
if data_columns is True:
data_columns = list(axis_labels)
elif data_columns is None:
data_columns = []
# if min_itemsize is a dict, add the keys (exclude 'values')
if isinstance(min_itemsize, dict):
existing_data_columns = set(data_columns)
data_columns.extend([
k for k in min_itemsize.keys()
if k != 'values' and k not in existing_data_columns
])
# return valid columns in the order of our axis
return [c for c in data_columns if c in axis_labels]
def create_axes(self, axes, obj, validate=True, nan_rep=None,
data_columns=None, min_itemsize=None, **kwargs):
""" create and return the axes
leagcy tables create an indexable column, indexable index,
non-indexable fields
Parameters:
-----------
axes: a list of the axes in order to create (names or numbers of
the axes)
obj : the object to create axes on
validate: validate the obj against an existing object already
written
min_itemsize: a dict of the min size for a column in bytes
nan_rep : a values to use for string column nan_rep
encoding : the encoding for string values
data_columns : a list of columns that we want to create separate to
allow indexing (or True will force all columns)
"""
# set the default axes if needed
if axes is None:
try:
axes = _AXES_MAP[type(obj)]
except KeyError:
raise TypeError(
"cannot properly create the storer for: [group->{group},"
"value->{value}]".format(
group=self.group._v_name, value=type(obj)))
# map axes to numbers
axes = [obj._get_axis_number(a) for a in axes]
# do we have an existing table (if so, use its axes & data_columns)
if self.infer_axes():
existing_table = self.copy()
existing_table.infer_axes()
axes = [a.axis for a in existing_table.index_axes]
data_columns = existing_table.data_columns
nan_rep = existing_table.nan_rep
self.encoding = existing_table.encoding
self.errors = existing_table.errors
self.info = copy.copy(existing_table.info)
else:
existing_table = None
# currently support on ndim-1 axes
if len(axes) != self.ndim - 1:
raise ValueError(
"currently only support ndim-1 indexers in an AppendableTable")
# create according to the new data
self.non_index_axes = []
self.data_columns = []
# nan_representation
if nan_rep is None:
nan_rep = 'nan'
self.nan_rep = nan_rep
# create axes to index and non_index
index_axes_map = dict()
for i, a in enumerate(obj.axes):
if i in axes:
name = obj._AXIS_NAMES[i]
index_axes_map[i] = _convert_index(
a, self.encoding, self.errors, self.format_type
).set_name(name).set_axis(i)
else:
# we might be able to change the axes on the appending data if
# necessary
append_axis = list(a)
if existing_table is not None:
indexer = len(self.non_index_axes)
exist_axis = existing_table.non_index_axes[indexer][1]
if not array_equivalent(np.array(append_axis),
np.array(exist_axis)):
# ahah! -> reindex
if array_equivalent(np.array(sorted(append_axis)),
np.array(sorted(exist_axis))):
append_axis = exist_axis
# the non_index_axes info
info = _get_info(self.info, i)
info['names'] = list(a.names)
info['type'] = a.__class__.__name__
self.non_index_axes.append((i, append_axis))
# set axis positions (based on the axes)
self.index_axes = [
index_axes_map[a].set_pos(j).update_info(self.info)
for j, a in enumerate(axes)
]
j = len(self.index_axes)
# check for column conflicts
for a in self.axes:
a.maybe_set_size(min_itemsize=min_itemsize)
# reindex by our non_index_axes & compute data_columns
for a in self.non_index_axes:
obj = _reindex_axis(obj, a[0], a[1])
def get_blk_items(mgr, blocks):
return [mgr.items.take(blk.mgr_locs) for blk in blocks]
# figure out data_columns and get out blocks
block_obj = self.get_object(obj)._consolidate()
blocks = block_obj._data.blocks
blk_items = get_blk_items(block_obj._data, blocks)
if len(self.non_index_axes):
axis, axis_labels = self.non_index_axes[0]
data_columns = self.validate_data_columns(
data_columns, min_itemsize)
if len(data_columns):
mgr = block_obj.reindex(
Index(axis_labels).difference(Index(data_columns)),
axis=axis
)._data
blocks = list(mgr.blocks)
blk_items = get_blk_items(mgr, blocks)
for c in data_columns:
mgr = block_obj.reindex([c], axis=axis)._data
blocks.extend(mgr.blocks)
blk_items.extend(get_blk_items(mgr, mgr.blocks))
# reorder the blocks in the same order as the existing_table if we can
if existing_table is not None:
by_items = {tuple(b_items.tolist()): (b, b_items)
for b, b_items in zip(blocks, blk_items)}
new_blocks = []
new_blk_items = []
for ea in existing_table.values_axes:
items = tuple(ea.values)
try:
b, b_items = by_items.pop(items)
new_blocks.append(b)
new_blk_items.append(b_items)
except (IndexError, KeyError):
raise ValueError(
"cannot match existing table structure for [{items}] "
"on appending data".format(
items=(','.join(pprint_thing(item) for
item in items))))
blocks = new_blocks
blk_items = new_blk_items
# add my values
self.values_axes = []
for i, (b, b_items) in enumerate(zip(blocks, blk_items)):
# shape of the data column are the indexable axes
klass = DataCol
name = None
# we have a data_column
if (data_columns and len(b_items) == 1 and
b_items[0] in data_columns):
klass = DataIndexableCol
name = b_items[0]
self.data_columns.append(name)
# make sure that we match up the existing columns
# if we have an existing table
if existing_table is not None and validate:
try:
existing_col = existing_table.values_axes[i]
except (IndexError, KeyError):
raise ValueError(
"Incompatible appended table [{blocks}]"
"with existing table [{table}]".format(
blocks=blocks,
table=existing_table.values_axes))
else:
existing_col = None
try:
col = klass.create_for_block(
i=i, name=name, version=self.version)
col.set_atom(block=b, block_items=b_items,
existing_col=existing_col,
min_itemsize=min_itemsize,
nan_rep=nan_rep,
encoding=self.encoding,
errors=self.errors,
info=self.info)
col.set_pos(j)
self.values_axes.append(col)
except (NotImplementedError, ValueError, TypeError) as e:
raise e
except Exception as detail:
raise Exception(
"cannot find the correct atom type -> "
"[dtype->{name},items->{items}] {detail!s}".format(
name=b.dtype.name, items=b_items, detail=detail))
j += 1
# validate our min_itemsize
self.validate_min_itemsize(min_itemsize)
# validate our metadata
self.validate_metadata(existing_table)
# validate the axes if we have an existing table
if validate:
self.validate(existing_table)
def process_axes(self, obj, columns=None):
""" process axes filters """
# make a copy to avoid side effects
if columns is not None:
columns = list(columns)
# make sure to include levels if we have them
if columns is not None and self.is_multi_index:
for n in self.levels:
if n not in columns:
columns.insert(0, n)
# reorder by any non_index_axes & limit to the select columns
for axis, labels in self.non_index_axes:
obj = _reindex_axis(obj, axis, labels, columns)
# apply the selection filters (but keep in the same order)
if self.selection.filter is not None:
for field, op, filt in self.selection.filter.format():
def process_filter(field, filt):
for axis_name in obj._AXIS_NAMES.values():
axis_number = obj._get_axis_number(axis_name)
axis_values = obj._get_axis(axis_name)
# see if the field is the name of an axis
if field == axis_name:
# if we have a multi-index, then need to include
# the levels
if self.is_multi_index:
filt = filt.union(Index(self.levels))
takers = op(axis_values, filt)
return obj.loc._getitem_axis(takers,
axis=axis_number)
# this might be the name of a file IN an axis
elif field in axis_values:
# we need to filter on this dimension
values = ensure_index(getattr(obj, field).values)
filt = ensure_index(filt)
# hack until we support reversed dim flags
if isinstance(obj, DataFrame):
axis_number = 1 - axis_number
takers = op(values, filt)
return obj.loc._getitem_axis(takers,
axis=axis_number)
raise ValueError("cannot find the field [{field}] for "
"filtering!".format(field=field))
obj = process_filter(field, filt)
return obj
def create_description(self, complib=None, complevel=None,
fletcher32=False, expectedrows=None):
""" create the description of the table from the axes & values """
# provided expected rows if its passed
if expectedrows is None:
expectedrows = max(self.nrows_expected, 10000)
d = dict(name='table', expectedrows=expectedrows)
# description from the axes & values
d['description'] = {a.cname: a.typ for a in self.axes}
if complib:
if complevel is None:
complevel = self._complevel or 9
filters = _tables().Filters(
complevel=complevel, complib=complib,
fletcher32=fletcher32 or self._fletcher32)
d['filters'] = filters
elif self._filters is not None:
d['filters'] = self._filters
return d
def read_coordinates(self, where=None, start=None, stop=None, **kwargs):
"""select coordinates (row numbers) from a table; return the
coordinates object
"""
# validate the version
self.validate_version(where)
# infer the data kind
if not self.infer_axes():
return False
# create the selection
self.selection = Selection(
self, where=where, start=start, stop=stop, **kwargs)
coords = self.selection.select_coords()
if self.selection.filter is not None:
for field, op, filt in self.selection.filter.format():
data = self.read_column(
field, start=coords.min(), stop=coords.max() + 1)
coords = coords[
op(data.iloc[coords - coords.min()], filt).values]
return Index(coords)
def read_column(self, column, where=None, start=None, stop=None):
"""return a single column from the table, generally only indexables
are interesting
"""
# validate the version
self.validate_version()
# infer the data kind
if not self.infer_axes():
return False
if where is not None:
raise TypeError("read_column does not currently accept a where "
"clause")
# find the axes
for a in self.axes:
if column == a.name:
if not a.is_data_indexable:
raise ValueError(
"column [{column}] can not be extracted individually; "
"it is not data indexable".format(column=column))
# column must be an indexable or a data column
c = getattr(self.table.cols, column)
a.set_info(self.info)
return Series(_set_tz(a.convert(c[start:stop],
nan_rep=self.nan_rep,
encoding=self.encoding,
errors=self.errors
).take_data(),
a.tz, True), name=column)
raise KeyError(
"column [{column}] not found in the table".format(column=column))
class WORMTable(Table):
""" a write-once read-many table: this format DOES NOT ALLOW appending to a
table. writing is a one-time operation the data are stored in a format
that allows for searching the data on disk
"""
table_type = u'worm'
def read(self, **kwargs):
""" read the indices and the indexing array, calculate offset rows and
return """
raise NotImplementedError("WORMTable needs to implement read")
def write(self, **kwargs):
""" write in a format that we can search later on (but cannot append
to): write out the indices and the values using _write_array
(e.g. a CArray) create an indexing table so that we can search
"""
raise NotImplementedError("WORKTable needs to implement write")
class LegacyTable(Table):
""" an appendable table: allow append/query/delete operations to a
(possibly) already existing appendable table this table ALLOWS
append (but doesn't require them), and stores the data in a format
that can be easily searched
"""
_indexables = [
IndexCol(name='index', axis=1, pos=0),
IndexCol(name='column', axis=2, pos=1, index_kind='columns_kind'),
DataCol(name='fields', cname='values', kind_attr='fields', pos=2)
]
table_type = u'legacy'
ndim = 3
def write(self, **kwargs):
raise TypeError("write operations are not allowed on legacy tables!")
def read(self, where=None, columns=None, **kwargs):
"""we have n indexable columns, with an arbitrary number of data
axes
"""
if not self.read_axes(where=where, **kwargs):
return None
lst_vals = [a.values for a in self.index_axes]
labels, levels = _factorize_from_iterables(lst_vals)
# labels and levels are tuples but lists are expected
labels = list(labels)
levels = list(levels)
N = [len(lvl) for lvl in levels]
# compute the key
key = _factor_indexer(N[1:], labels)
objs = []
if len(unique(key)) == len(key):
sorter, _ = algos.groupsort_indexer(
ensure_int64(key), np.prod(N))
sorter = ensure_platform_int(sorter)
# create the objs
for c in self.values_axes:
# the data need to be sorted
sorted_values = c.take_data().take(sorter, axis=0)
if sorted_values.ndim == 1:
sorted_values = sorted_values.reshape(
(sorted_values.shape[0], 1))
take_labels = [l.take(sorter) for l in labels]
items = Index(c.values)
block = _block2d_to_blocknd(
values=sorted_values, placement=np.arange(len(items)),
shape=tuple(N), labels=take_labels, ref_items=items)
# create the object
mgr = BlockManager([block], [items] + levels)
obj = self.obj_type(mgr)
# permute if needed
if self.is_transposed:
obj = obj.transpose(
*tuple(Series(self.data_orientation).argsort()))
objs.append(obj)
else:
warnings.warn(duplicate_doc, DuplicateWarning, stacklevel=5)
# reconstruct
long_index = MultiIndex.from_arrays(
[i.values for i in self.index_axes])
for c in self.values_axes:
lp = DataFrame(c.data, index=long_index, columns=c.values)
# need a better algorithm
tuple_index = long_index.values
unique_tuples = unique(tuple_index)
unique_tuples = com.asarray_tuplesafe(unique_tuples)
indexer = match(unique_tuples, tuple_index)
indexer = ensure_platform_int(indexer)
new_index = long_index.take(indexer)
new_values = lp.values.take(indexer, axis=0)
lp = DataFrame(new_values, index=new_index, columns=lp.columns)
objs.append(lp.to_panel())
# create the composite object
if len(objs) == 1:
wp = objs[0]
else:
wp = concat(objs, axis=0, verify_integrity=False)._consolidate()
# apply the selection filters & axis orderings
wp = self.process_axes(wp, columns=columns)
return wp
class LegacyFrameTable(LegacyTable):
""" support the legacy frame table """
pandas_kind = u'frame_table'
table_type = u'legacy_frame'
obj_type = Panel
def read(self, *args, **kwargs):
return super(LegacyFrameTable, self).read(*args, **kwargs)['value']
class LegacyPanelTable(LegacyTable):
""" support the legacy panel table """
table_type = u'legacy_panel'
obj_type = Panel
class AppendableTable(LegacyTable):
""" suppor the new appendable table formats """
_indexables = None
table_type = u'appendable'
def write(self, obj, axes=None, append=False, complib=None,
complevel=None, fletcher32=None, min_itemsize=None,
chunksize=None, expectedrows=None, dropna=False, **kwargs):
if not append and self.is_exists:
self._handle.remove_node(self.group, 'table')
# create the axes
self.create_axes(axes=axes, obj=obj, validate=append,
min_itemsize=min_itemsize,
**kwargs)
for a in self.axes:
a.validate(self, append)
if not self.is_exists:
# create the table
options = self.create_description(complib=complib,
complevel=complevel,
fletcher32=fletcher32,
expectedrows=expectedrows)
# set the table attributes
self.set_attrs()
# create the table
self._handle.create_table(self.group, **options)
else:
pass
# table = self.table
# update my info
self.set_info()
# validate the axes and set the kinds
for a in self.axes:
a.validate_and_set(self, append)
# add the rows
self.write_data(chunksize, dropna=dropna)
def write_data(self, chunksize, dropna=False):
""" we form the data into a 2-d including indexes,values,mask
write chunk-by-chunk """
names = self.dtype.names
nrows = self.nrows_expected
# if dropna==True, then drop ALL nan rows
masks = []
if dropna:
for a in self.values_axes:
# figure the mask: only do if we can successfully process this
# column, otherwise ignore the mask
mask = isna(a.data).all(axis=0)
if isinstance(mask, np.ndarray):
masks.append(mask.astype('u1', copy=False))
# consolidate masks
if len(masks):
mask = masks[0]
for m in masks[1:]:
mask = mask & m
mask = mask.ravel()
else:
mask = None
# broadcast the indexes if needed
indexes = [a.cvalues for a in self.index_axes]
nindexes = len(indexes)
bindexes = []
for i, idx in enumerate(indexes):
# broadcast to all other indexes except myself
if i > 0 and i < nindexes:
repeater = np.prod(
[indexes[bi].shape[0] for bi in range(0, i)])
idx = np.tile(idx, repeater)
if i < nindexes - 1:
repeater = np.prod([indexes[bi].shape[0]
for bi in range(i + 1, nindexes)])
idx = np.repeat(idx, repeater)
bindexes.append(idx)
# transpose the values so first dimension is last
# reshape the values if needed
values = [a.take_data() for a in self.values_axes]
values = [v.transpose(np.roll(np.arange(v.ndim), v.ndim - 1))
for v in values]
bvalues = []
for i, v in enumerate(values):
new_shape = (nrows,) + self.dtype[names[nindexes + i]].shape
bvalues.append(values[i].reshape(new_shape))
# write the chunks
if chunksize is None:
chunksize = 100000
rows = np.empty(min(chunksize, nrows), dtype=self.dtype)
chunks = int(nrows / chunksize) + 1
for i in range(chunks):
start_i = i * chunksize
end_i = min((i + 1) * chunksize, nrows)
if start_i >= end_i:
break
self.write_data_chunk(
rows,
indexes=[a[start_i:end_i] for a in bindexes],
mask=mask[start_i:end_i] if mask is not None else None,
values=[v[start_i:end_i] for v in bvalues])
def write_data_chunk(self, rows, indexes, mask, values):
"""
Parameters
----------
rows : an empty memory space where we are putting the chunk
indexes : an array of the indexes
mask : an array of the masks
values : an array of the values
"""
# 0 len
for v in values:
if not np.prod(v.shape):
return
try:
nrows = indexes[0].shape[0]
if nrows != len(rows):
rows = np.empty(nrows, dtype=self.dtype)
names = self.dtype.names
nindexes = len(indexes)
# indexes
for i, idx in enumerate(indexes):
rows[names[i]] = idx
# values
for i, v in enumerate(values):
rows[names[i + nindexes]] = v
# mask
if mask is not None:
m = ~mask.ravel().astype(bool, copy=False)
if not m.all():
rows = rows[m]
except Exception as detail:
raise Exception(
"cannot create row-data -> {detail}".format(detail=detail))
try:
if len(rows):
self.table.append(rows)
self.table.flush()
except Exception as detail:
raise TypeError(
"tables cannot write this data -> {detail}".format(
detail=detail))
def delete(self, where=None, start=None, stop=None, **kwargs):
# delete all rows (and return the nrows)
if where is None or not len(where):
if start is None and stop is None:
nrows = self.nrows
self._handle.remove_node(self.group, recursive=True)
else:
# pytables<3.0 would remove a single row with stop=None
if stop is None:
stop = self.nrows
nrows = self.table.remove_rows(start=start, stop=stop)
self.table.flush()
return nrows
# infer the data kind
if not self.infer_axes():
return None
# create the selection
table = self.table
self.selection = Selection(
self, where, start=start, stop=stop, **kwargs)
values = self.selection.select_coords()
# delete the rows in reverse order
sorted_series = Series(values).sort_values()
ln = len(sorted_series)
if ln:
# construct groups of consecutive rows
diff = sorted_series.diff()
groups = list(diff[diff > 1].index)
# 1 group
if not len(groups):
groups = [0]
# final element
if groups[-1] != ln:
groups.append(ln)
# initial element
if groups[0] != 0:
groups.insert(0, 0)
# we must remove in reverse order!
pg = groups.pop()
for g in reversed(groups):
rows = sorted_series.take(lrange(g, pg))
table.remove_rows(start=rows[rows.index[0]
], stop=rows[rows.index[-1]] + 1)
pg = g
self.table.flush()
# return the number of rows removed
return ln
class AppendableFrameTable(AppendableTable):
""" suppor the new appendable table formats """
pandas_kind = u'frame_table'
table_type = u'appendable_frame'
ndim = 2
obj_type = DataFrame
@property
def is_transposed(self):
return self.index_axes[0].axis == 1
def get_object(self, obj):
""" these are written transposed """
if self.is_transposed:
obj = obj.T
return obj
def read(self, where=None, columns=None, **kwargs):
if not self.read_axes(where=where, **kwargs):
return None
info = (self.info.get(self.non_index_axes[0][0], dict())
if len(self.non_index_axes) else dict())
index = self.index_axes[0].values
frames = []
for a in self.values_axes:
# we could have a multi-index constructor here
# ensure_index doesn't recognized our list-of-tuples here
if info.get('type') == 'MultiIndex':
cols = MultiIndex.from_tuples(a.values)
else:
cols = Index(a.values)
names = info.get('names')
if names is not None:
cols.set_names(names, inplace=True)
if self.is_transposed:
values = a.cvalues
index_ = cols
cols_ = Index(index, name=getattr(index, 'name', None))
else:
values = a.cvalues.T
index_ = Index(index, name=getattr(index, 'name', None))
cols_ = cols
# if we have a DataIndexableCol, its shape will only be 1 dim
if values.ndim == 1 and isinstance(values, np.ndarray):
values = values.reshape((1, values.shape[0]))
block = make_block(values, placement=np.arange(len(cols_)))
mgr = BlockManager([block], [cols_, index_])
frames.append(DataFrame(mgr))
if len(frames) == 1:
df = frames[0]
else:
df = concat(frames, axis=1)
# apply the selection filters & axis orderings
df = self.process_axes(df, columns=columns)
return df
class AppendableSeriesTable(AppendableFrameTable):
""" support the new appendable table formats """
pandas_kind = u'series_table'
table_type = u'appendable_series'
ndim = 2
obj_type = Series
storage_obj_type = DataFrame
@property
def is_transposed(self):
return False
def get_object(self, obj):
return obj
def write(self, obj, data_columns=None, **kwargs):
""" we are going to write this as a frame table """
if not isinstance(obj, DataFrame):
name = obj.name or 'values'
obj = DataFrame({name: obj}, index=obj.index)
obj.columns = [name]
return super(AppendableSeriesTable, self).write(
obj=obj, data_columns=obj.columns.tolist(), **kwargs)
def read(self, columns=None, **kwargs):
is_multi_index = self.is_multi_index
if columns is not None and is_multi_index:
for n in self.levels:
if n not in columns:
columns.insert(0, n)
s = super(AppendableSeriesTable, self).read(columns=columns, **kwargs)
if is_multi_index:
s.set_index(self.levels, inplace=True)
s = s.iloc[:, 0]
# remove the default name
if s.name == 'values':
s.name = None
return s
class AppendableMultiSeriesTable(AppendableSeriesTable):
""" support the new appendable table formats """
pandas_kind = u'series_table'
table_type = u'appendable_multiseries'
def write(self, obj, **kwargs):
""" we are going to write this as a frame table """
name = obj.name or 'values'
obj, self.levels = self.validate_multiindex(obj)
cols = list(self.levels)
cols.append(name)
obj.columns = cols
return super(AppendableMultiSeriesTable, self).write(obj=obj, **kwargs)
class GenericTable(AppendableFrameTable):
""" a table that read/writes the generic pytables table format """
pandas_kind = u'frame_table'
table_type = u'generic_table'
ndim = 2
obj_type = DataFrame
@property
def pandas_type(self):
return self.pandas_kind
@property
def storable(self):
return getattr(self.group, 'table', None) or self.group
def get_attrs(self):
""" retrieve our attributes """
self.non_index_axes = []
self.nan_rep = None
self.levels = []
self.index_axes = [a.infer(self)
for a in self.indexables if a.is_an_indexable]
self.values_axes = [a.infer(self)
for a in self.indexables if not a.is_an_indexable]
self.data_columns = [a.name for a in self.values_axes]
@property
def indexables(self):
""" create the indexables from the table description """
if self._indexables is None:
d = self.description
# the index columns is just a simple index
self._indexables = [GenericIndexCol(name='index', axis=0)]
for i, n in enumerate(d._v_names):
dc = GenericDataIndexableCol(
name=n, pos=i, values=[n], version=self.version)
self._indexables.append(dc)
return self._indexables
def write(self, **kwargs):
raise NotImplementedError("cannot write on an generic table")
class AppendableMultiFrameTable(AppendableFrameTable):
""" a frame with a multi-index """
table_type = u'appendable_multiframe'
obj_type = DataFrame
ndim = 2
_re_levels = re.compile(r"^level_\d+$")
@property
def table_type_short(self):
return u'appendable_multi'
def write(self, obj, data_columns=None, **kwargs):
if data_columns is None:
data_columns = []
elif data_columns is True:
data_columns = obj.columns.tolist()
obj, self.levels = self.validate_multiindex(obj)
for n in self.levels:
if n not in data_columns:
data_columns.insert(0, n)
return super(AppendableMultiFrameTable, self).write(
obj=obj, data_columns=data_columns, **kwargs)
def read(self, **kwargs):
df = super(AppendableMultiFrameTable, self).read(**kwargs)
df = df.set_index(self.levels)
# remove names for 'level_%d'
df.index = df.index.set_names([
None if self._re_levels.search(l) else l for l in df.index.names
])
return df
class AppendablePanelTable(AppendableTable):
""" suppor the new appendable table formats """
table_type = u'appendable_panel'
ndim = 3
obj_type = Panel
def get_object(self, obj):
""" these are written transposed """
if self.is_transposed:
obj = obj.transpose(*self.data_orientation)
return obj
@property
def is_transposed(self):
return self.data_orientation != tuple(range(self.ndim))
def _reindex_axis(obj, axis, labels, other=None):
ax = obj._get_axis(axis)
labels = ensure_index(labels)
# try not to reindex even if other is provided
# if it equals our current index
if other is not None:
other = ensure_index(other)
if (other is None or labels.equals(other)) and labels.equals(ax):
return obj
labels = ensure_index(labels.unique())
if other is not None:
labels = ensure_index(other.unique()).intersection(labels, sort=False)
if not labels.equals(ax):
slicer = [slice(None, None)] * obj.ndim
slicer[axis] = labels
obj = obj.loc[tuple(slicer)]
return obj
def _get_info(info, name):
""" get/create the info for this name """
try:
idx = info[name]
except KeyError:
idx = info[name] = dict()
return idx
# tz to/from coercion
def _get_tz(tz):
""" for a tz-aware type, return an encoded zone """
zone = timezones.get_timezone(tz)
if zone is None:
zone = tz.utcoffset().total_seconds()
return zone
def _set_tz(values, tz, preserve_UTC=False, coerce=False):
"""
coerce the values to a DatetimeIndex if tz is set
preserve the input shape if possible
Parameters
----------
values : ndarray
tz : string/pickled tz object
preserve_UTC : boolean,
preserve the UTC of the result
coerce : if we do not have a passed timezone, coerce to M8[ns] ndarray
"""
if tz is not None:
name = getattr(values, 'name', None)
values = values.ravel()
tz = timezones.get_timezone(_ensure_decoded(tz))
values = DatetimeIndex(values, name=name)
if values.tz is None:
values = values.tz_localize('UTC').tz_convert(tz)
if preserve_UTC:
if tz == 'UTC':
values = list(values)
elif coerce:
values = np.asarray(values, dtype='M8[ns]')
return values
def _convert_index(index, encoding=None, errors='strict', format_type=None):
index_name = getattr(index, 'name', None)
if isinstance(index, DatetimeIndex):
converted = index.asi8
return IndexCol(converted, 'datetime64', _tables().Int64Col(),
freq=getattr(index, 'freq', None),
tz=getattr(index, 'tz', None),
index_name=index_name)
elif isinstance(index, TimedeltaIndex):
converted = index.asi8
return IndexCol(converted, 'timedelta64', _tables().Int64Col(),
freq=getattr(index, 'freq', None),
index_name=index_name)
elif isinstance(index, (Int64Index, PeriodIndex)):
atom = _tables().Int64Col()
# avoid to store ndarray of Period objects
return IndexCol(index._ndarray_values, 'integer', atom,
freq=getattr(index, 'freq', None),
index_name=index_name)
if isinstance(index, MultiIndex):
raise TypeError('MultiIndex not supported here!')
inferred_type = lib.infer_dtype(index, skipna=False)
values = np.asarray(index)
if inferred_type == 'datetime64':
converted = values.view('i8')
return IndexCol(converted, 'datetime64', _tables().Int64Col(),
freq=getattr(index, 'freq', None),
tz=getattr(index, 'tz', None),
index_name=index_name)
elif inferred_type == 'timedelta64':
converted = values.view('i8')
return IndexCol(converted, 'timedelta64', _tables().Int64Col(),
freq=getattr(index, 'freq', None),
index_name=index_name)
elif inferred_type == 'datetime':
converted = np.asarray([(time.mktime(v.timetuple()) +
v.microsecond / 1E6) for v in values],
dtype=np.float64)
return IndexCol(converted, 'datetime', _tables().Time64Col(),
index_name=index_name)
elif inferred_type == 'date':
converted = np.asarray([v.toordinal() for v in values],
dtype=np.int32)
return IndexCol(converted, 'date', _tables().Time32Col(),
index_name=index_name)
elif inferred_type == 'string':
# atom = _tables().ObjectAtom()
# return np.asarray(values, dtype='O'), 'object', atom
converted = _convert_string_array(values, encoding, errors)
itemsize = converted.dtype.itemsize
return IndexCol(
converted, 'string', _tables().StringCol(itemsize),
itemsize=itemsize, index_name=index_name
)
elif inferred_type == 'unicode':
if format_type == 'fixed':
atom = _tables().ObjectAtom()
return IndexCol(np.asarray(values, dtype='O'), 'object', atom,
index_name=index_name)
raise TypeError(
"[unicode] is not supported as a in index type for [{0}] formats"
.format(format_type)
)
elif inferred_type == 'integer':
# take a guess for now, hope the values fit
atom = _tables().Int64Col()
return IndexCol(np.asarray(values, dtype=np.int64), 'integer', atom,
index_name=index_name)
elif inferred_type == 'floating':
atom = _tables().Float64Col()
return IndexCol(np.asarray(values, dtype=np.float64), 'float', atom,
index_name=index_name)
else: # pragma: no cover
atom = _tables().ObjectAtom()
return IndexCol(np.asarray(values, dtype='O'), 'object', atom,
index_name=index_name)
def _unconvert_index(data, kind, encoding=None, errors='strict'):
kind = _ensure_decoded(kind)
if kind == u'datetime64':
index = DatetimeIndex(data)
elif kind == u'timedelta64':
index = TimedeltaIndex(data)
elif kind == u'datetime':
index = np.asarray([datetime.fromtimestamp(v) for v in data],
dtype=object)
elif kind == u'date':
try:
index = np.asarray(
[date.fromordinal(v) for v in data], dtype=object)
except (ValueError):
index = np.asarray(
[date.fromtimestamp(v) for v in data], dtype=object)
elif kind in (u'integer', u'float'):
index = np.asarray(data)
elif kind in (u'string'):
index = _unconvert_string_array(data, nan_rep=None, encoding=encoding,
errors=errors)
elif kind == u'object':
index = np.asarray(data[0])
else: # pragma: no cover
raise ValueError('unrecognized index type {kind}'.format(kind=kind))
return index
def _unconvert_index_legacy(data, kind, legacy=False, encoding=None,
errors='strict'):
kind = _ensure_decoded(kind)
if kind == u'datetime':
index = to_datetime(data)
elif kind in (u'integer'):
index = np.asarray(data, dtype=object)
elif kind in (u'string'):
index = _unconvert_string_array(data, nan_rep=None, encoding=encoding,
errors=errors)
else: # pragma: no cover
raise ValueError('unrecognized index type {kind}'.format(kind=kind))
return index
def _convert_string_array(data, encoding, errors, itemsize=None):
"""
we take a string-like that is object dtype and coerce to a fixed size
string type
Parameters
----------
data : a numpy array of object dtype
encoding : None or string-encoding
errors : handler for encoding errors
itemsize : integer, optional, defaults to the max length of the strings
Returns
-------
data in a fixed-length string dtype, encoded to bytes if needed
"""
# encode if needed
if encoding is not None and len(data):
data = Series(data.ravel()).str.encode(
encoding, errors).values.reshape(data.shape)
# create the sized dtype
if itemsize is None:
ensured = ensure_object(data.ravel())
itemsize = max(1, libwriters.max_len_string_array(ensured))
data = np.asarray(data, dtype="S{size}".format(size=itemsize))
return data
def _unconvert_string_array(data, nan_rep=None, encoding=None,
errors='strict'):
"""
inverse of _convert_string_array
Parameters
----------
data : fixed length string dtyped array
nan_rep : the storage repr of NaN, optional
encoding : the encoding of the data, optional
errors : handler for encoding errors, default 'strict'
Returns
-------
an object array of the decoded data
"""
shape = data.shape
data = np.asarray(data.ravel(), dtype=object)
# guard against a None encoding in PY3 (because of a legacy
# where the passed encoding is actually None)
encoding = _ensure_encoding(encoding)
if encoding is not None and len(data):
itemsize = libwriters.max_len_string_array(ensure_object(data))
if compat.PY3:
dtype = "U{0}".format(itemsize)
else:
dtype = "S{0}".format(itemsize)
if isinstance(data[0], compat.binary_type):
data = Series(data).str.decode(encoding, errors=errors).values
else:
data = data.astype(dtype, copy=False).astype(object, copy=False)
if nan_rep is None:
nan_rep = 'nan'
data = libwriters.string_array_replace_from_nan_rep(data, nan_rep)
return data.reshape(shape)
def _maybe_convert(values, val_kind, encoding, errors):
if _need_convert(val_kind):
conv = _get_converter(val_kind, encoding, errors)
# conv = np.frompyfunc(conv, 1, 1)
values = conv(values)
return values
def _get_converter(kind, encoding, errors):
kind = _ensure_decoded(kind)
if kind == 'datetime64':
return lambda x: np.asarray(x, dtype='M8[ns]')
elif kind == 'datetime':
return lambda x: to_datetime(x, cache=True).to_pydatetime()
elif kind == 'string':
return lambda x: _unconvert_string_array(x, encoding=encoding,
errors=errors)
else: # pragma: no cover
raise ValueError('invalid kind {kind}'.format(kind=kind))
def _need_convert(kind):
kind = _ensure_decoded(kind)
if kind in (u'datetime', u'datetime64', u'string'):
return True
return False
class Selection(object):
"""
Carries out a selection operation on a tables.Table object.
Parameters
----------
table : a Table object
where : list of Terms (or convertible to)
start, stop: indices to start and/or stop selection
"""
def __init__(self, table, where=None, start=None, stop=None):
self.table = table
self.where = where
self.start = start
self.stop = stop
self.condition = None
self.filter = None
self.terms = None
self.coordinates = None
if is_list_like(where):
# see if we have a passed coordinate like
try:
inferred = lib.infer_dtype(where, skipna=False)
if inferred == 'integer' or inferred == 'boolean':
where = np.asarray(where)
if where.dtype == np.bool_:
start, stop = self.start, self.stop
if start is None:
start = 0
if stop is None:
stop = self.table.nrows
self.coordinates = np.arange(start, stop)[where]
elif issubclass(where.dtype.type, np.integer):
if ((self.start is not None and
(where < self.start).any()) or
(self.stop is not None and
(where >= self.stop).any())):
raise ValueError(
"where must have index locations >= start and "
"< stop"
)
self.coordinates = where
except ValueError:
pass
if self.coordinates is None:
self.terms = self.generate(where)
# create the numexpr & the filter
if self.terms is not None:
self.condition, self.filter = self.terms.evaluate()
def generate(self, where):
""" where can be a : dict,list,tuple,string """
if where is None:
return None
q = self.table.queryables()
try:
return Expr(where, queryables=q, encoding=self.table.encoding)
except NameError:
# raise a nice message, suggesting that the user should use
# data_columns
raise ValueError(
"The passed where expression: {0}\n"
" contains an invalid variable reference\n"
" all of the variable references must be a "
"reference to\n"
" an axis (e.g. 'index' or 'columns'), or a "
"data_column\n"
" The currently defined references are: {1}\n"
.format(where, ','.join(q.keys()))
)
def select(self):
"""
generate the selection
"""
if self.condition is not None:
return self.table.table.read_where(self.condition.format(),
start=self.start,
stop=self.stop)
elif self.coordinates is not None:
return self.table.table.read_coordinates(self.coordinates)
return self.table.table.read(start=self.start, stop=self.stop)
def select_coords(self):
"""
generate the selection
"""
start, stop = self.start, self.stop
nrows = self.table.nrows
if start is None:
start = 0
elif start < 0:
start += nrows
if self.stop is None:
stop = nrows
elif stop < 0:
stop += nrows
if self.condition is not None:
return self.table.table.get_where_list(self.condition.format(),
start=start, stop=stop,
sort=True)
elif self.coordinates is not None:
return self.coordinates
return np.arange(start, stop)
# utilities ###
def timeit(key, df, fn=None, remove=True, **kwargs):
if fn is None:
fn = 'timeit.h5'
store = HDFStore(fn, mode='w')
store.append(key, df, **kwargs)
store.close()
if remove:
os.remove(fn)
| bsd-3-clause |
sinhrks/scikit-learn | benchmarks/bench_glmnet.py | 297 | 3848 | """
To run this, you'll need to have installed.
* glmnet-python
* scikit-learn (of course)
Does two benchmarks
First, we fix a training set and increase the number of
samples. Then we plot the computation time as function of
the number of samples.
In the second benchmark, we increase the number of dimensions of the
training set. Then we plot the computation time as function of
the number of dimensions.
In both cases, only 10% of the features are informative.
"""
import numpy as np
import gc
from time import time
from sklearn.datasets.samples_generator import make_regression
alpha = 0.1
# alpha = 0.01
def rmse(a, b):
return np.sqrt(np.mean((a - b) ** 2))
def bench(factory, X, Y, X_test, Y_test, ref_coef):
gc.collect()
# start time
tstart = time()
clf = factory(alpha=alpha).fit(X, Y)
delta = (time() - tstart)
# stop time
print("duration: %0.3fs" % delta)
print("rmse: %f" % rmse(Y_test, clf.predict(X_test)))
print("mean coef abs diff: %f" % abs(ref_coef - clf.coef_.ravel()).mean())
return delta
if __name__ == '__main__':
from glmnet.elastic_net import Lasso as GlmnetLasso
from sklearn.linear_model import Lasso as ScikitLasso
# Delayed import of pylab
import pylab as pl
scikit_results = []
glmnet_results = []
n = 20
step = 500
n_features = 1000
n_informative = n_features / 10
n_test_samples = 1000
for i in range(1, n + 1):
print('==================')
print('Iteration %s of %s' % (i, n))
print('==================')
X, Y, coef_ = make_regression(
n_samples=(i * step) + n_test_samples, n_features=n_features,
noise=0.1, n_informative=n_informative, coef=True)
X_test = X[-n_test_samples:]
Y_test = Y[-n_test_samples:]
X = X[:(i * step)]
Y = Y[:(i * step)]
print("benchmarking scikit-learn: ")
scikit_results.append(bench(ScikitLasso, X, Y, X_test, Y_test, coef_))
print("benchmarking glmnet: ")
glmnet_results.append(bench(GlmnetLasso, X, Y, X_test, Y_test, coef_))
pl.clf()
xx = range(0, n * step, step)
pl.title('Lasso regression on sample dataset (%d features)' % n_features)
pl.plot(xx, scikit_results, 'b-', label='scikit-learn')
pl.plot(xx, glmnet_results, 'r-', label='glmnet')
pl.legend()
pl.xlabel('number of samples to classify')
pl.ylabel('Time (s)')
pl.show()
# now do a benchmark where the number of points is fixed
# and the variable is the number of features
scikit_results = []
glmnet_results = []
n = 20
step = 100
n_samples = 500
for i in range(1, n + 1):
print('==================')
print('Iteration %02d of %02d' % (i, n))
print('==================')
n_features = i * step
n_informative = n_features / 10
X, Y, coef_ = make_regression(
n_samples=(i * step) + n_test_samples, n_features=n_features,
noise=0.1, n_informative=n_informative, coef=True)
X_test = X[-n_test_samples:]
Y_test = Y[-n_test_samples:]
X = X[:n_samples]
Y = Y[:n_samples]
print("benchmarking scikit-learn: ")
scikit_results.append(bench(ScikitLasso, X, Y, X_test, Y_test, coef_))
print("benchmarking glmnet: ")
glmnet_results.append(bench(GlmnetLasso, X, Y, X_test, Y_test, coef_))
xx = np.arange(100, 100 + n * step, step)
pl.figure('scikit-learn vs. glmnet benchmark results')
pl.title('Regression in high dimensional spaces (%d samples)' % n_samples)
pl.plot(xx, scikit_results, 'b-', label='scikit-learn')
pl.plot(xx, glmnet_results, 'r-', label='glmnet')
pl.legend()
pl.xlabel('number of features')
pl.ylabel('Time (s)')
pl.axis('tight')
pl.show()
| bsd-3-clause |
mhdella/scikit-learn | examples/model_selection/plot_roc_crossval.py | 247 | 3253 | """
=============================================================
Receiver Operating Characteristic (ROC) with cross validation
=============================================================
Example of Receiver Operating Characteristic (ROC) metric to evaluate
classifier output quality using cross-validation.
ROC curves typically feature true positive rate on the Y axis, and false
positive rate on the X axis. This means that the top left corner of the plot is
the "ideal" point - a false positive rate of zero, and a true positive rate of
one. This is not very realistic, but it does mean that a larger area under the
curve (AUC) is usually better.
The "steepness" of ROC curves is also important, since it is ideal to maximize
the true positive rate while minimizing the false positive rate.
This example shows the ROC response of different datasets, created from K-fold
cross-validation. Taking all of these curves, it is possible to calculate the
mean area under curve, and see the variance of the curve when the
training set is split into different subsets. This roughly shows how the
classifier output is affected by changes in the training data, and how
different the splits generated by K-fold cross-validation are from one another.
.. note::
See also :func:`sklearn.metrics.auc_score`,
:func:`sklearn.cross_validation.cross_val_score`,
:ref:`example_model_selection_plot_roc.py`,
"""
print(__doc__)
import numpy as np
from scipy import interp
import matplotlib.pyplot as plt
from sklearn import svm, datasets
from sklearn.metrics import roc_curve, auc
from sklearn.cross_validation import StratifiedKFold
###############################################################################
# Data IO and generation
# import some data to play with
iris = datasets.load_iris()
X = iris.data
y = iris.target
X, y = X[y != 2], y[y != 2]
n_samples, n_features = X.shape
# Add noisy features
random_state = np.random.RandomState(0)
X = np.c_[X, random_state.randn(n_samples, 200 * n_features)]
###############################################################################
# Classification and ROC analysis
# Run classifier with cross-validation and plot ROC curves
cv = StratifiedKFold(y, n_folds=6)
classifier = svm.SVC(kernel='linear', probability=True,
random_state=random_state)
mean_tpr = 0.0
mean_fpr = np.linspace(0, 1, 100)
all_tpr = []
for i, (train, test) in enumerate(cv):
probas_ = classifier.fit(X[train], y[train]).predict_proba(X[test])
# Compute ROC curve and area the curve
fpr, tpr, thresholds = roc_curve(y[test], probas_[:, 1])
mean_tpr += interp(mean_fpr, fpr, tpr)
mean_tpr[0] = 0.0
roc_auc = auc(fpr, tpr)
plt.plot(fpr, tpr, lw=1, label='ROC fold %d (area = %0.2f)' % (i, roc_auc))
plt.plot([0, 1], [0, 1], '--', color=(0.6, 0.6, 0.6), label='Luck')
mean_tpr /= len(cv)
mean_tpr[-1] = 1.0
mean_auc = auc(mean_fpr, mean_tpr)
plt.plot(mean_fpr, mean_tpr, 'k--',
label='Mean ROC (area = %0.2f)' % mean_auc, lw=2)
plt.xlim([-0.05, 1.05])
plt.ylim([-0.05, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic example')
plt.legend(loc="lower right")
plt.show()
| bsd-3-clause |
xiyuw123/Tax-Calculator | taxcalc/utils.py | 1 | 8185 | import numpy as np
import pandas as pd
from pandas import DataFrame
STATS_COLUMNS = ['c00100', 'c04100', 'c04470', 'c04800', 'c05200',
'c09600', 'c07100', 'c09200', '_refund', '_ospctax',
'c10300', 'e00100', 's006']
TABLE_COLUMNS = ['c00100', 'c04100', 'c04470', 'c04800', 'c05200',
'c09600', 'c07100', 'c09200', '_refund', '_ospctax',
'c10300']
def extract_array(f):
"""
A sanity check decorator. When combined with numba.vectorize
or guvectorize, it provides the same capability as dataframe_vectorize
or dataframe_guvectorize
"""
def wrapper(*args, **kwargs):
arrays = [arg.values for arg in args]
return f(*arrays)
return wrapper
def expand_1D(x, inflate, inflation_rates, num_years):
"""
Expand the given data to account for the given number of budget years.
If necessary, pad out additional years by increasing the last given
year at the provided inflation rate.
"""
assert len(inflation_rates) == num_years
if isinstance(x, np.ndarray):
if len(x) >= num_years:
return x
else:
ans = np.zeros(num_years, dtype='f8')
ans[:len(x)] = x
if inflate:
extra = []
cur = x[-1]
for i in range(1, num_years - len(x) + 1):
inf_idx = i + len(x) - 1
cur *= (1. + inflation_rates[inf_idx])
extra.append(cur)
else:
extra = [float(x[-1]) for i in
range(1, num_years - len(x) + 1)]
ans[len(x):] = extra
return ans.astype(x.dtype, casting='unsafe')
return expand_1D(np.array([x]), inflate, inflation_rates, num_years)
def expand_2D(x, inflate, inflation_rates, num_years):
"""
Expand the given data to account for the given number of budget years.
For 2D arrays, we expand out the number of rows until we have num_years
number of rows. For each expanded row, we inflate by the given inflation
rate.
"""
if isinstance(x, np.ndarray):
if x.shape[0] >= num_years:
return x
else:
ans = np.zeros((num_years, x.shape[1]))
ans[:len(x), :] = x
if inflate:
extra = []
cur = x[-1]
for i in range(1, num_years - len(x) + 1):
inf_idx = i + len(x) - 1
cur = np.array(cur*(1. + inflation_rates[inf_idx]))
extra.append(cur)
else:
extra = [x[-1, :] for i in
range(1, num_years - len(x) + 1)]
ans[len(x):, :] = extra
return ans.astype(x.dtype, casting='unsafe')
return expand_2D(np.array([x]), inflate, inflation_rates, num_years)
def expand_array(x, inflate, inflation_rates, num_years):
"""
Dispatch to either expand_1D or expand2D depending on the dimension of x
Parameters
----------
x : value to expand
inflate: Boolean
As we expand, inflate values if this is True, otherwise, just copy
inflation_rate: float
Yearly inflation reate
num_years: int
Number of budget years to expand
Returns
-------
expanded numpy array
"""
try:
if len(x.shape) == 1:
return expand_1D(x, inflate, inflation_rates, num_years)
elif len(x.shape) == 2:
return expand_2D(x, inflate, inflation_rates, num_years)
else:
raise ValueError("Need a 1D or 2D array")
except AttributeError as ae:
raise ValueError("Must pass a numpy array")
def count_gt_zero(agg):
return sum([1 for a in agg if a > 0])
def count_lt_zero(agg):
return sum([1 for a in agg if a < 0])
def weighted_count_lt_zero(agg, col_name):
return agg[agg[col_name] < 0]['s006'].sum()
def weighted_count_gt_zero(agg, col_name):
return agg[agg[col_name] > 0]['s006'].sum()
def weighted_count(agg):
return agg['s006'].sum()
def weighted_mean(agg, col_name):
return float((agg[col_name]*agg['s006']).sum()) / float(agg['s006'].sum())
def weighted_sum(agg, col_name):
return (agg[col_name]*agg['s006']).sum()
def weighted_perc_inc(agg, col_name):
return float(weighted_count_gt_zero(agg, col_name)) / float(weighted_count(agg))
def weighted_perc_dec(agg, col_name):
return float(weighted_count_lt_zero(agg, col_name)) / float(weighted_count(agg))
def weighted_share_of_total(agg, col_name, total):
return float(weighted_sum(agg, col_name)) / float(total)
def groupby_weighted_decile(df):
"""
Group by each 10% of AGI, weighed by s006
"""
#First, sort by AGI
df.sort('c00100', inplace=True)
#Next, do a cumulative sum by the weights
df['cumsum_weights'] = np.cumsum(df['s006'].values)
#Max value of cum sum of weights
max_ = df['cumsum_weights'].values[-1]
#Create 10 bins and labels based on this cumulative weight
bins = [0] + list(np.arange(1,11)*(max_/10.0))
labels = [range(1,11)]
# Groupby weighted deciles
decile_bins = list(range(1, 11))
df['wdecs'] = pd.cut(df['cumsum_weights'], bins, labels)
return df.groupby('wdecs')
def groupby_income_bins(df, bins=None, right=True):
"""
Group by income bins of AGI
bins: iterable of scalars
AGI income breakpoints. Follows pandas convention. The
breakpoint is inclusive if right=True
right : bool, optional
Indicates whether the bins include the rightmost edge or not.
If right == True (the default), then the bins [1,2,3,4]
indicate (1,2], (2,3], (3,4].
"""
if not bins:
bins = [-1e14, 0, 9999, 19999, 29999, 39999, 49999, 74999, 99999,
200000, 1e14]
# Groupby c00100 bins
df['bins'] = pd.cut(df['c00100'], bins, right=right)
return df.groupby('bins')
def means_and_comparisons(df, col_name, gp, weighted_total):
"""
Using grouped values, perform aggregate operations
to populate
df: DataFrame for full results of calculation
col_name: the column name to calculate against
gp: grouped DataFrame
"""
# Who has a tax cut, and who has a tax increase
diffs = gp.apply(weighted_count_lt_zero, col_name)
diffs = DataFrame(data=diffs, columns=['tax_cut'])
diffs['tax_inc'] = gp.apply(weighted_count_gt_zero, col_name)
diffs['count'] = gp.apply(weighted_count)
diffs['mean'] = gp.apply(weighted_mean, col_name)
diffs['tot_change'] = gp.apply(weighted_sum, col_name)
diffs['perc_inc'] = gp.apply(weighted_perc_inc, col_name)
diffs['perc_cut'] = gp.apply(weighted_perc_dec, col_name)
diffs['share_of_change'] = gp.apply(weighted_share_of_total,
col_name, weighted_total)
return diffs
def results(c):
outputs = [getattr(c, col) for col in STATS_COLUMNS]
return DataFrame(data=np.column_stack(outputs), columns=STATS_COLUMNS)
def create_distribution_table(calc, groupby):
res = results(calc)
if groupby == "weighted_deciles":
gp = groupby_weighted_decile(res)
elif groupby == "agi_bins":
gp = groupby_income_bins(res)
else:
err = "groupby must be either 'weighted_deciles' or 'agi_bins'"
raise ValueError(err)
return gp[TABLE_COLUMNS].mean()
def create_difference_table(calc1, calc2, groupby):
res1 = results(calc1)
res2 = results(calc2)
if groupby == "weighted_deciles":
gp = groupby_weighted_decile(res2)
elif groupby == "agi_bins":
gp = groupby_income_bins(res2)
else:
err = "groupby must be either 'weighted_deciles' or 'agi_bins'"
raise ValueError(err)
# Difference in plans
# Positive values are the magnitude of the tax increase
# Negative values are the magnitude of the tax decrease
res2['tax_diff'] = res2['_ospctax'] - res1['_ospctax']
diffs = means_and_comparisons(res2, 'tax_diff', gp,
(res2['tax_diff']*res2['s006']).sum())
return diffs
| mit |
thirdwing/SFrame | oss_src/unity/python/sframe/test/test_graph.py | 9 | 16156 | '''
Copyright (C) 2015 Dato, Inc.
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
'''
# from nose import with_setup
from ..data_structures.sgraph import SGraph, Vertex, Edge, load_graph
from ..data_structures.sframe import SFrame
import pandas as pd
from pandas.util.testing import assert_frame_equal
import numpy as np
import unittest
import tempfile
import util
import json
import os
class GraphTests(unittest.TestCase):
def setUp(self):
self.vertices = pd.DataFrame({
'vid': ['1', '2', '3'],
'color': ['g', None, 'b'],
'vec': [[.1, .1, .1], [.1, .1, .1], [.1, .1, .1]]})
self.edges = pd.DataFrame({
'src_id': ['1', '2', '3'],
'dst_id': ['2', '3', '4'],
'weight': [0., None, 1.]})
def test_empty_graph(self):
g = SGraph()
self.assertEqual(g.summary(), {'num_vertices': 0, 'num_edges': 0})
self.assertEqual(len(g.get_fields()), 3)
self.assertTrue(g.get_vertices(format='sframe').shape, (0, 1))
self.assertTrue(g.get_edges(format='sframe').shape, (0, 2))
self.assertTrue(g.vertices.shape, (0, 1))
self.assertTrue(g.edges.shape, (0, 2))
self.assertTrue(len(g.get_vertices(format='list')) == 0)
self.assertTrue(len(g.get_edges(format='list')) == 0)
def test_graph_constructor(self):
g = SGraph().add_vertices(self.vertices, 'vid').add_edges(self.edges, 'src_id', 'dst_id')
g2 = SGraph(g.vertices, g.edges)
g3 = SGraph(g.vertices, g.edges, src_field="__dst_id", dst_field="__src_id") #flip around src and dst
assert_frame_equal(g.vertices.to_dataframe().sort('__id').reset_index(drop=True),
g2.vertices.to_dataframe().sort('__id').reset_index(drop=True))
assert_frame_equal(g.edges.to_dataframe().sort(['__src_id', '__dst_id']).reset_index(drop=True),
g2.edges.to_dataframe().sort(['__src_id', '__dst_id']).reset_index(drop=True))
self.assertRaises(ValueError, lambda: SGraph(SFrame(self.vertices), SFrame(self.edges)))
self.assertRaises(ValueError, lambda: SGraph(SFrame(self.vertices), SFrame(self.edges), 'vid', '__src_id', '__dst_id'))
self.assertRaises(ValueError, lambda: SGraph(SFrame(self.vertices), SFrame(self.edges), vid_field=None, src_field='src_id', dst_field='dst_id'))
def test_simple_graph(self):
for input_type in [pd.DataFrame, SFrame, list]:
g = SGraph()
if input_type is list:
vertices = [Vertex(x[1]['vid'], {'color': x[1]['color'], 'vec': x[1]['vec']}) for x in self.vertices.iterrows()]
edges = [Edge(x[1]['src_id'], x[1]['dst_id'], {'weight': x[1]['weight']}) for x in self.edges.iterrows()]
g = g.add_vertices(vertices)
g = g.add_edges(edges)
else:
g = g.add_vertices(input_type(self.vertices), vid_field='vid')
g = g.add_edges(input_type(self.edges), src_field='src_id', dst_field='dst_id')
self.assertEqual(g.summary(), {'num_vertices': 4, 'num_edges': 3})
self.assertItemsEqual(g.get_fields(), ['__id', '__src_id', '__dst_id', 'color', 'vec', 'weight'])
self.assertItemsEqual(g.get_vertices(format='dataframe').columns.values, ['color', 'vec'])
self.assertItemsEqual(g.get_edges(format='dataframe').columns.values, ['__src_id', '__dst_id', 'weight'])
self.assertTrue(g.get_edges(format='dataframe').shape, (3, 3))
self.assertTrue(g.get_vertices(format='dataframe').shape, (4, 3))
self.assertTrue(g.get_vertices(format='dataframe', fields={'color': 'g'}).shape, (1, 2))
self.assertTrue(g.get_edges(format='dataframe', fields={'weight': 0.}).shape, (1, 3))
self.assertItemsEqual(g.get_vertices(format='sframe').column_names(), ['__id', 'color', 'vec'])
self.assertItemsEqual(g.get_edges(format='sframe').column_names(), ['__src_id', '__dst_id', 'weight'])
self.assertTrue(g.get_edges(format='sframe').shape, (3, 3))
self.assertTrue(g.get_vertices(format='sframe').shape, (4, 3))
self.assertTrue(g.get_vertices(format='sframe', fields={'color': 'g'}).shape, (1, 2))
self.assertTrue(g.get_edges(format='sframe', fields={'weight': 0.}).shape, (1, 3))
vertices = g.get_vertices(format='list')
edges = g.get_edges(format='list')
self.assertEqual(len(vertices), 4)
self.assertEqual(len(edges), 3)
# get edges is lazy
edges = g.get_edges()
self.assertFalse(edges.__is_materialized__())
def test_vertex_query(self):
df = pd.DataFrame({'src': ['a', 'c', 'b', 'd', 'c', 'e', 'g', 'f'],
'dst': ['b', 'b', 'd', 'c', 'e', 'g', 'f', 'e']})
g = SGraph().add_edges(df, src_field='src', dst_field='dst')
# basic check
g2 = g.get_neighborhood(ids=['b'], radius=1, full_subgraph=False)
out = g2.get_edges(format='dataframe')
out.sort(columns=['__src_id', '__dst_id'], axis=0, inplace=True)
out.index = range(len(out))
correct = pd.DataFrame.from_records([('b', 'd'),
('a', 'b'),
('c', 'b')],
columns=['__src_id', '__dst_id'])
correct.sort(columns=['__src_id', '__dst_id'], axis=0, inplace=True)
correct.index = range(len(correct))
assert_frame_equal(out, correct, check_dtype=False)
# check larger radius, full subgraph, and multiple vertices
g2 = g.get_neighborhood(ids=['a', 'g'], radius=2, full_subgraph=True)
out = g2.get_edges(format='dataframe')
out.sort(columns=['__src_id', '__dst_id'], axis=0, inplace=True)
out.index = range(len(out))
correct = pd.DataFrame.from_records([('a', 'b'),
('b', 'd'),
('c', 'b'),
('c', 'e'),
('d', 'c'),
('e', 'g'),
('f', 'e'),
('g', 'f')],
columns=['__src_id', '__dst_id'])
correct.sort(columns=['__src_id', '__dst_id'], axis=0, inplace=True)
correct.index = range(len(correct))
assert_frame_equal(out, correct, check_dtype=False)
def test_select_query(self):
g = SGraph()
g = g.add_vertices(self.vertices, 'vid').add_edges(self.edges, 'src_id', 'dst_id')
g2 = g.select_fields(["color", "weight"])
self.assertSequenceEqual((g2.get_fields()), ['__id', 'color', '__src_id', '__dst_id', 'weight'])
g2 = g.select_fields(["color"])
self.assertSequenceEqual((g2.get_fields()), ['__id', 'color', '__src_id', '__dst_id'])
del g.edges['weight']
del g.vertices['vec']
g.vertices['color2'] = g.vertices['color']
self.assertSequenceEqual((g.get_fields()), ['__id', 'color', 'color2', '__src_id', '__dst_id'])
g2 = g.select_fields([])
self.assertSequenceEqual((g2.get_fields()), ['__id', '__src_id', '__dst_id'])
def test_select_query_with_same_vertex_edge_field(self):
vertices = SFrame({'__id': range(10)})
edges = SFrame({'__src_id': range(10), '__dst_id': range(1, 11)})
g = SGraph(vertices, edges)
g.vertices['weight'] = 0
g.vertices['v'] = 0
g.edges['weight'] = 0
g.edges['e'] = 0
self.assertItemsEqual(g.get_fields(), ['v', 'e', 'weight', 'weight', '__id', '__src_id', '__dst_id'])
g2 = g.select_fields('weight')
self.assertItemsEqual(g2.get_fields(), ['weight', 'weight', '__id', '__src_id', '__dst_id'])
def test_save_load(self):
g = SGraph().add_vertices(self.vertices, 'vid').add_edges(self.edges, 'src_id', 'dst_id')
with util.TempDirectory() as f:
g.save(f)
g2 = load_graph(f, 'binary')
self.assertEqual(g2.summary(), {'num_vertices': 4, 'num_edges': 3})
self.assertItemsEqual(g2.get_fields(), {'__id', '__src_id', '__dst_id', 'color', 'vec', 'weight'})
with util.TempDirectory() as f:
g.save(f, format='csv')
vertices = SFrame.read_csv(f + "/vertices.csv")
edges = SFrame.read_csv(f + "/edges.csv")
g2 = SGraph().add_edges(edges, '__src_id', '__dst_id').add_vertices(vertices, '__id')
self.assertEqual(g2.summary(), {'num_vertices': 4, 'num_edges': 3})
self.assertItemsEqual(g2.get_fields(), {'__id', '__src_id', '__dst_id', 'color', 'vec', 'weight'})
temp_fn = None
# The delete=False is for Windows sake
with tempfile.NamedTemporaryFile(suffix='.json', delete=False) as f:
temp_fn = f.name
g.save(f.name)
with open(f.name, 'r') as f2:
data = f2.read()
g2 = json.loads(data)
self.assertTrue("vertices" in g2)
self.assertTrue("edges" in g2)
if os.path.exists(temp_fn):
os.remove(temp_fn)
def test_load_graph_from_text(self):
toy_graph_snap = """#some comment string
#some more comment string
1\t2
1\t3
2\t3
2\t1
3\t1
3\t2"""
toy_graph_tsv = """1\t2
1\t3
2\t3
2\t1
3\t1
3\t2"""
toy_graph_csv = """1,2
1,3
2,3
2,1
3,1
3,2"""
temp_fnames = []
with tempfile.NamedTemporaryFile(delete=False) as fsnap, tempfile.NamedTemporaryFile(delete=False) as ftsv, tempfile.NamedTemporaryFile(delete=False) as fcsv:
fsnap.write(toy_graph_snap)
fsnap.file.flush()
ftsv.write(toy_graph_tsv)
ftsv.file.flush()
fcsv.write(toy_graph_csv)
fcsv.file.flush()
for (fname, fmt) in zip([fsnap.name, ftsv.name, fcsv.name], ['snap', 'tsv', 'csv']):
g = load_graph('remote://' + fname, fmt)
self.assertEqual(g.summary(), {'num_vertices': 3, 'num_edges': 6})
temp_fnames.append(fname)
for name in temp_fnames:
if os.path.exists(name):
os.remove(name)
def test_robust_parse(self):
df = pd.DataFrame({'int': [1, 2, 3],
'float': [1., 2., 3.],
'str': ['one', 'two', 'three'],
'nan': [np.nan, np.nan, np.nan],
'sparse_int': [1, 2, np.nan],
'sparse_float': [np.nan, 2., 3.],
'sparse_str': [None, 'two', None]
})
g = SGraph().add_vertices(df)
self.assertItemsEqual(g.get_fields(), df.columns.tolist() + ['__id', '__src_id', '__dst_id'])
df2 = g.get_vertices(format='dataframe')
sf = g.get_vertices(format='sframe')
for col in df.columns:
# potential bug: df2 is missing the 'nan' column.
if (col != 'nan'):
self.assertItemsEqual(sorted(list(df2[col].dropna())), sorted(list(df[col].dropna())))
self.assertItemsEqual(sorted(list(sf[col].dropna())), sorted(list(df[col].dropna())))
def test_missing_value_vids(self):
vertices = SFrame()
vertices['vid'] = [1, 2, 3, None]
edges = SFrame()
edges['src'] = [1, 2, 3, None]
edges['dst'] = [4, 4, 4, 4]
self.assertRaises(RuntimeError, lambda : SGraph().add_vertices(vertices, 'vid').summary())
self.assertRaises(RuntimeError, lambda : SGraph().add_edges(edges, 'src', 'dst').summary())
self.assertRaises(RuntimeError, lambda : SGraph().add_edges(edges, 'dst', 'src').summary())
def test_gframe(self):
g = SGraph()
v = g.vertices
self.assertSequenceEqual(v.column_names(), ['__id'])
e = g.edges
self.assertSequenceEqual(e.column_names(), ['__src_id', '__dst_id'])
# Test vertices and edge attributes cannot be modified
def set_vertices_empty(g):
g.vertices = SFrame()
def set_edges_empty(g):
g.edges = SFrame()
def remove_vertices(g):
del g.vertices
def remove_edges(g):
del g.edges
def remove_edge_column(gf, name):
del gf[name]
self.assertRaises(AttributeError, lambda: remove_vertices(g))
self.assertRaises(AttributeError, lambda: remove_edges(g))
self.assertRaises(AttributeError, lambda: set_vertices_empty(g))
self.assertRaises(AttributeError, lambda: set_edges_empty(g))
# Test gframe operations has the same effect as its sframe+graph equivalent
g = SGraph().add_vertices(self.vertices, 'vid').add_edges(self.edges, 'src_id', 'dst_id')
v = g.vertices
v['id_col'] = v['__id']
e = g.edges
e['src_id_col'] = e['__src_id']
e['dst_id_col'] = e['__dst_id']
g2 = SGraph().add_vertices(self.vertices, 'vid').add_edges(self.edges, 'src_id', 'dst_id')
new_vdata = g2.get_vertices()
new_vdata['id_col'] = new_vdata['__id']
new_edata = g2.get_edges()
new_edata['src_id_col'] = new_edata['__src_id']
new_edata['dst_id_col'] = new_edata['__dst_id']
g2 = SGraph().add_vertices(new_vdata, '__id').add_edges(new_edata, '__src_id', '__dst_id')
assert_frame_equal(g.get_vertices().to_dataframe().sort('__id').reset_index(drop=True),
g2.get_vertices().to_dataframe().sort('__id').reset_index(drop=True))
assert_frame_equal(g.get_edges().to_dataframe().sort(['__src_id', '__dst_id']).reset_index(drop=True),
g2.get_edges().to_dataframe().sort(['__src_id', '__dst_id']).reset_index(drop=True))
# check delete a column with exception, and edges is still in a valid state
self.assertRaises(KeyError, lambda: remove_edge_column(g.edges, 'badcolumn'))
g.edges.head()
# test slicing
assert_frame_equal(g.edges[:3].to_dataframe(), g.get_edges()[:3].to_dataframe())
assert_frame_equal(g.vertices[:3].to_dataframe(), g.get_vertices()[:3].to_dataframe())
# test add row number
e_expected = g.get_edges().to_dataframe()
v_expected = g.get_vertices().to_dataframe()
e_expected['id'] = range(len(e_expected))
v_expected['id'] = range(len(v_expected))
def test_sframe_le_append_skip_row_bug_is_fixed(self):
"""
This test is actually for SFrame lazy evaluation.
The reason it is here is because the repro can only be done in SGraph.
The bug appears when the SFrame has lazy_append and when passing through
the logical filter, skip_rows is not done correctly. So the edge_sframe
is in a bad state when not materialized.
This unit test stays here to ensure the bug is fixed until we can find
a more clean repro.
"""
n = 12 # smallest n to repro the le_append bug
# A graph with edge i -> i + 1
g = SGraph().add_edges(SFrame({'src': range(n), 'dst': range(1, n + 1)}), 'src', 'dst')
lazy_sf = g.get_edges()
materialized_sf = g.get_edges()
materialized_sf.__materialize__()
assert_frame_equal(lazy_sf[lazy_sf['__dst_id'] == n].to_dataframe(), materialized_sf[materialized_sf['__dst_id'] == n].to_dataframe())
| bsd-3-clause |
magnastrazh/NEUCOGAR | nest/serotonin/research/C/nest-2.10.0/topology/pynest/tests/test_dumping.py | 13 | 3483 | # -*- coding: utf-8 -*-
#
# test_dumping.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
Tests for topology hl_api dumping functions.
NOTE: These tests only test whether the code runs, it does not check
whether the results produced are correct.
"""
import unittest
import nest
import nest.topology as topo
import sys
import os
import os.path
class PlottingTestCase(unittest.TestCase):
def nest_tmpdir(self):
"""Loads temporary directory path from the environment variable, returns current directory otherwise"""
if 'NEST_DATA_PATH' in os.environ:
return os.environ['NEST_DATA_PATH']
else:
return '.'
def test_DumpNodes(self):
"""Test dumping nodes."""
ldict = {'elements': 'iaf_neuron', 'rows': 3, 'columns':3,
'extent': [2., 2.], 'edge_wrap': True}
nest.ResetKernel()
l = topo.CreateLayer(ldict)
topo.DumpLayerNodes(l, os.path.join(self.nest_tmpdir(), 'test_DumpNodes.out.lyr') )
self.assertTrue(True)
def test_DumpNodes2(self):
"""Test dumping nodes, two layers."""
ldict = {'elements': 'iaf_neuron', 'rows': 3, 'columns':3,
'extent': [2., 2.], 'edge_wrap': True}
nest.ResetKernel()
l = topo.CreateLayer(ldict)
topo.DumpLayerNodes(l*2, os.path.join(self.nest_tmpdir(), 'test_DumpNodes2.out.lyr') )
self.assertTrue(True)
def test_DumpConns(self):
"""Test dumping connections."""
ldict = {'elements': 'iaf_neuron', 'rows': 3, 'columns':3,
'extent': [2., 2.], 'edge_wrap': True}
cdict = {'connection_type': 'divergent', 'mask': {'circular': {'radius': 1.}}}
nest.ResetKernel()
l = topo.CreateLayer(ldict)
topo.ConnectLayers(l, l, cdict)
topo.DumpLayerConnections(l, 'static_synapse', os.path.join(self.nest_tmpdir(), 'test_DumpConns.out.cnn') )
self.assertTrue(True)
def test_DumpConns2(self):
"""Test dumping connections, 2 layers."""
ldict = {'elements': 'iaf_neuron', 'rows': 3, 'columns':3,
'extent': [2., 2.], 'edge_wrap': True}
cdict = {'connection_type': 'divergent', 'mask': {'circular': {'radius': 1.}}}
nest.ResetKernel()
l = topo.CreateLayer(ldict)
topo.ConnectLayers(l, l, cdict)
topo.DumpLayerConnections(l*2, 'static_synapse', os.path.join(self.nest_tmpdir(), 'test_DumpConns2.out.cnn') )
self.assertTrue(True)
def suite():
suite = unittest.makeSuite(PlottingTestCase,'test')
return suite
if __name__ == "__main__":
runner = unittest.TextTestRunner(verbosity=2)
runner.run(suite())
try:
import matplotlib.pyplot as plt
plt.show()
except ImportError:
pass
| gpl-2.0 |
jerryjiahaha/rts2 | scripts/rts2saf/rts2saf/fitdisplay.py | 3 | 3586 | #!/usr/bin/python
# (C) 2013, Markus Wildi, [email protected]
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# Or visit http://www.gnu.org/licenses/gpl.html.
#
__author__ = '[email protected]'
import sys
if 'matplotlib' not in sys.modules:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
class FitDisplay(object):
"""Display a fit with matplotlib
:var date: date when focus run started
:var comment: optional comment
:var logger: :py:mod:`rts2saf.log`
"""
def __init__(self, date=None, comment=None, logger=None):
self.date=date
self.logger=logger
self.comment=comment
self.fig=None
self.ax1=None
self.fig = plt.figure()
self.ax1 = self.fig.add_subplot(111)
def fitDisplay(self, dataFit=None, resultFit=None, show=True, display=False, xdisplay = None):
"""Display fit using matplotlib
:param dataFit: :py:mod:`rts2saf.data.DataFit`
:param resultFit: :py:mod:`rts2saf.data.ResultFit`
:param display: if True display and save plot to file, False save only
:return: :py:mod:`rts2saf.data.DataFit`.plotFn
"""
try:
x_pos = np.linspace(dataFit.pos.min(), dataFit.pos.max())
except Exception, e:
self.logger.error('fitDisplay: numpy error:\n{0}'.format(e))
return e
self.ax1.plot(dataFit.pos, dataFit.val, 'ro', color=resultFit.color)
self.ax1.errorbar(dataFit.pos, dataFit.val, xerr=dataFit.errx, yerr=dataFit.erry, ecolor='black', fmt='none')
if resultFit.fitFlag:
line, = self.ax1.plot(x_pos, dataFit.fitFunc(x_pos, p=resultFit.fitPar), 'r-', color=resultFit.color)
if self.comment:
self.ax1.set_title('rts2saf, {0},{1},{2}C,{3},{4}'.format(self.date, dataFit.ftName, dataFit.ambientTemp, resultFit.titleResult, self.comment), fontsize=12)
else:
self.ax1.set_title('rts2saf, {0},{1},{2}C,{3}'.format(self.date, dataFit.ftName, dataFit.ambientTemp, resultFit.titleResult), fontsize=12)
self.ax1.set_xlabel('FOC_POS [tick]')
self.ax1.set_ylabel(resultFit.ylabel)
self.ax1.grid(True)
if show and display and xdisplay:
# NO: self.fig.show()
plt.show()
elif display and not xdisplay:
self.logger.warn('fitDisplay: NO $DISPLAY no plot')
# no return here, save plot
try:
self.fig.savefig(dataFit.plotFn)
self.logger.info('fitDisplay: storing plot file: {0}'.format(dataFit.plotFn))
return dataFit.plotFn
except Exception, e:
self.logger.error('fitDisplay: can not save plot to: {0}, matplotlib error:\n{1}'.format(dataFit.plotFn,e))
return e
| lgpl-3.0 |
mlyundin/scikit-learn | examples/text/document_classification_20newsgroups.py | 222 | 10500 | """
======================================================
Classification of text documents using sparse features
======================================================
This is an example showing how scikit-learn can be used to classify documents
by topics using a bag-of-words approach. This example uses a scipy.sparse
matrix to store the features and demonstrates various classifiers that can
efficiently handle sparse matrices.
The dataset used in this example is the 20 newsgroups dataset. It will be
automatically downloaded, then cached.
The bar plot indicates the accuracy, training time (normalized) and test time
(normalized) of each classifier.
"""
# Author: Peter Prettenhofer <[email protected]>
# Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Lars Buitinck <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
import logging
import numpy as np
from optparse import OptionParser
import sys
from time import time
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_selection import SelectKBest, chi2
from sklearn.linear_model import RidgeClassifier
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC
from sklearn.linear_model import SGDClassifier
from sklearn.linear_model import Perceptron
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.naive_bayes import BernoulliNB, MultinomialNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.neighbors import NearestCentroid
from sklearn.ensemble import RandomForestClassifier
from sklearn.utils.extmath import density
from sklearn import metrics
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
# parse commandline arguments
op = OptionParser()
op.add_option("--report",
action="store_true", dest="print_report",
help="Print a detailed classification report.")
op.add_option("--chi2_select",
action="store", type="int", dest="select_chi2",
help="Select some number of features using a chi-squared test")
op.add_option("--confusion_matrix",
action="store_true", dest="print_cm",
help="Print the confusion matrix.")
op.add_option("--top10",
action="store_true", dest="print_top10",
help="Print ten most discriminative terms per class"
" for every classifier.")
op.add_option("--all_categories",
action="store_true", dest="all_categories",
help="Whether to use all categories or not.")
op.add_option("--use_hashing",
action="store_true",
help="Use a hashing vectorizer.")
op.add_option("--n_features",
action="store", type=int, default=2 ** 16,
help="n_features when using the hashing vectorizer.")
op.add_option("--filtered",
action="store_true",
help="Remove newsgroup information that is easily overfit: "
"headers, signatures, and quoting.")
(opts, args) = op.parse_args()
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
print(__doc__)
op.print_help()
print()
###############################################################################
# Load some categories from the training set
if opts.all_categories:
categories = None
else:
categories = [
'alt.atheism',
'talk.religion.misc',
'comp.graphics',
'sci.space',
]
if opts.filtered:
remove = ('headers', 'footers', 'quotes')
else:
remove = ()
print("Loading 20 newsgroups dataset for categories:")
print(categories if categories else "all")
data_train = fetch_20newsgroups(subset='train', categories=categories,
shuffle=True, random_state=42,
remove=remove)
data_test = fetch_20newsgroups(subset='test', categories=categories,
shuffle=True, random_state=42,
remove=remove)
print('data loaded')
categories = data_train.target_names # for case categories == None
def size_mb(docs):
return sum(len(s.encode('utf-8')) for s in docs) / 1e6
data_train_size_mb = size_mb(data_train.data)
data_test_size_mb = size_mb(data_test.data)
print("%d documents - %0.3fMB (training set)" % (
len(data_train.data), data_train_size_mb))
print("%d documents - %0.3fMB (test set)" % (
len(data_test.data), data_test_size_mb))
print("%d categories" % len(categories))
print()
# split a training set and a test set
y_train, y_test = data_train.target, data_test.target
print("Extracting features from the training data using a sparse vectorizer")
t0 = time()
if opts.use_hashing:
vectorizer = HashingVectorizer(stop_words='english', non_negative=True,
n_features=opts.n_features)
X_train = vectorizer.transform(data_train.data)
else:
vectorizer = TfidfVectorizer(sublinear_tf=True, max_df=0.5,
stop_words='english')
X_train = vectorizer.fit_transform(data_train.data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_train_size_mb / duration))
print("n_samples: %d, n_features: %d" % X_train.shape)
print()
print("Extracting features from the test data using the same vectorizer")
t0 = time()
X_test = vectorizer.transform(data_test.data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_test_size_mb / duration))
print("n_samples: %d, n_features: %d" % X_test.shape)
print()
# mapping from integer feature name to original token string
if opts.use_hashing:
feature_names = None
else:
feature_names = vectorizer.get_feature_names()
if opts.select_chi2:
print("Extracting %d best features by a chi-squared test" %
opts.select_chi2)
t0 = time()
ch2 = SelectKBest(chi2, k=opts.select_chi2)
X_train = ch2.fit_transform(X_train, y_train)
X_test = ch2.transform(X_test)
if feature_names:
# keep selected feature names
feature_names = [feature_names[i] for i
in ch2.get_support(indices=True)]
print("done in %fs" % (time() - t0))
print()
if feature_names:
feature_names = np.asarray(feature_names)
def trim(s):
"""Trim string to fit on terminal (assuming 80-column display)"""
return s if len(s) <= 80 else s[:77] + "..."
###############################################################################
# Benchmark classifiers
def benchmark(clf):
print('_' * 80)
print("Training: ")
print(clf)
t0 = time()
clf.fit(X_train, y_train)
train_time = time() - t0
print("train time: %0.3fs" % train_time)
t0 = time()
pred = clf.predict(X_test)
test_time = time() - t0
print("test time: %0.3fs" % test_time)
score = metrics.accuracy_score(y_test, pred)
print("accuracy: %0.3f" % score)
if hasattr(clf, 'coef_'):
print("dimensionality: %d" % clf.coef_.shape[1])
print("density: %f" % density(clf.coef_))
if opts.print_top10 and feature_names is not None:
print("top 10 keywords per class:")
for i, category in enumerate(categories):
top10 = np.argsort(clf.coef_[i])[-10:]
print(trim("%s: %s"
% (category, " ".join(feature_names[top10]))))
print()
if opts.print_report:
print("classification report:")
print(metrics.classification_report(y_test, pred,
target_names=categories))
if opts.print_cm:
print("confusion matrix:")
print(metrics.confusion_matrix(y_test, pred))
print()
clf_descr = str(clf).split('(')[0]
return clf_descr, score, train_time, test_time
results = []
for clf, name in (
(RidgeClassifier(tol=1e-2, solver="lsqr"), "Ridge Classifier"),
(Perceptron(n_iter=50), "Perceptron"),
(PassiveAggressiveClassifier(n_iter=50), "Passive-Aggressive"),
(KNeighborsClassifier(n_neighbors=10), "kNN"),
(RandomForestClassifier(n_estimators=100), "Random forest")):
print('=' * 80)
print(name)
results.append(benchmark(clf))
for penalty in ["l2", "l1"]:
print('=' * 80)
print("%s penalty" % penalty.upper())
# Train Liblinear model
results.append(benchmark(LinearSVC(loss='l2', penalty=penalty,
dual=False, tol=1e-3)))
# Train SGD model
results.append(benchmark(SGDClassifier(alpha=.0001, n_iter=50,
penalty=penalty)))
# Train SGD with Elastic Net penalty
print('=' * 80)
print("Elastic-Net penalty")
results.append(benchmark(SGDClassifier(alpha=.0001, n_iter=50,
penalty="elasticnet")))
# Train NearestCentroid without threshold
print('=' * 80)
print("NearestCentroid (aka Rocchio classifier)")
results.append(benchmark(NearestCentroid()))
# Train sparse Naive Bayes classifiers
print('=' * 80)
print("Naive Bayes")
results.append(benchmark(MultinomialNB(alpha=.01)))
results.append(benchmark(BernoulliNB(alpha=.01)))
print('=' * 80)
print("LinearSVC with L1-based feature selection")
# The smaller C, the stronger the regularization.
# The more regularization, the more sparsity.
results.append(benchmark(Pipeline([
('feature_selection', LinearSVC(penalty="l1", dual=False, tol=1e-3)),
('classification', LinearSVC())
])))
# make some plots
indices = np.arange(len(results))
results = [[x[i] for x in results] for i in range(4)]
clf_names, score, training_time, test_time = results
training_time = np.array(training_time) / np.max(training_time)
test_time = np.array(test_time) / np.max(test_time)
plt.figure(figsize=(12, 8))
plt.title("Score")
plt.barh(indices, score, .2, label="score", color='r')
plt.barh(indices + .3, training_time, .2, label="training time", color='g')
plt.barh(indices + .6, test_time, .2, label="test time", color='b')
plt.yticks(())
plt.legend(loc='best')
plt.subplots_adjust(left=.25)
plt.subplots_adjust(top=.95)
plt.subplots_adjust(bottom=.05)
for i, c in zip(indices, clf_names):
plt.text(-.3, i, c)
plt.show()
| bsd-3-clause |
M-R-Houghton/euroscipy_2015 | bokeh/bokeh/sampledata/periodic_table.py | 45 | 1542 | '''
This module provides the periodic table as a data set. It exposes an attribute 'elements'
which is a pandas dataframe with the following fields
elements['atomic Number'] (units: g/cm^3)
elements['symbol']
elements['name']
elements['atomic mass'] (units: amu)
elements['CPK'] (convention for molecular modeling color)
elements['electronic configuration']
elements['electronegativity'] (units: Pauling)
elements['atomic radius'] (units: pm)
elements['ionic radius'] (units: pm)
elements['van der waals radius'] (units: pm)
elements['ionization enerygy'] (units: kJ/mol)
elements['electron affinity'] (units: kJ/mol)
elements['phase'] (standard state: solid, liquid, gas)
elements['bonding type']
elements['melting point'] (units: K)
elements['boiling point'] (units: K)
elements['density'] (units: g/cm^3)
elements['type'] (see below)
elements['year discovered']
elements['group']
elements['period']
element types: actinoid, alkali metal, alkaline earth metal, halogen, lanthanoid, metal, metalloid, noble gas, nonmetal, transition metalloid
'''
from __future__ import absolute_import
from os.path import dirname, join
try:
import pandas as pd
except ImportError as e:
raise RuntimeError("elements data requires pandas (http://pandas.pydata.org) to be installed")
elements = pd.read_csv(join(dirname(__file__), 'elements.csv'))
| mit |
sujitpal/polydlot | src/tf2/01-mfact-model.py | 1 | 7210 | # https://www.tensorflow.org/alpha/guide/eager
import h5py
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from pathlib import Path
from scipy.sparse import csr_matrix
from sklearn.preprocessing import MaxAbsScaler
class MatrixFactorization(tf.keras.layers.Layer):
def __init__(self, emb_sz, **kwargs):
super(MatrixFactorization, self).__init__(**kwargs)
self.emb_sz = emb_sz
# self.dynamic = True
def build(self, input_shape):
num_users, num_movies = input_shape
self.U = self.add_variable("U",
shape=[num_users, self.emb_sz],
dtype=tf.float32,
initializer=tf.initializers.GlorotUniform)
self.M = self.add_variable("M",
shape=[num_movies, self.emb_sz],
dtype=tf.float32,
initializer=tf.initializers.GlorotUniform)
self.bu = self.add_variable("bu",
shape=[num_users],
dtype=tf.float32,
initializer=tf.initializers.Zeros)
self.bm = self.add_variable("bm",
shape=[num_movies],
dtype=tf.float32,
initializer=tf.initializers.Zeros)
self.bg = self.add_variable("bg",
shape=[],
dtype=tf.float32,
initializer=tf.initializers.Zeros)
def call(self, input):
return (tf.add(
tf.add(
tf.matmul(self.U, tf.transpose(self.M)),
tf.expand_dims(self.bu, axis=1)),
tf.expand_dims(self.bm, axis=0)) +
self.bg)
class MatrixFactorizer(tf.keras.Model):
def __init__(self, embedding_size):
super(MatrixFactorizer, self).__init__()
self.matrixFactorization = MatrixFactorization(embedding_size)
self.sigmoid = tf.keras.layers.Activation("sigmoid")
def call(self, input):
output = self.matrixFactorization(input)
output = self.sigmoid(output)
return output
def loss_fn(source, target):
mse = tf.keras.losses.MeanSquaredError()
loss = mse(source, target)
return loss
def build_numpy_lookup(id2idx, lookup_size):
lookup = np.zeros((lookup_size))
idx2id = {idx: id for (id, idx) in id2idx.items()}
for idx, id in idx2id.items():
lookup[idx] = id
return lookup
def load_data():
movie_id2title = {}
mid_list = []
with open(MOVIES_FILE, "r") as fmov:
for line in fmov:
if line.startswith("\"movieId"):
continue
cols = line.strip().split(",")
mid, mtitle = cols[0], cols[1]
mid = int(mid)
mtitle = mtitle.strip("\"")
movie_id2title[mid] = mtitle
mid_list.append(mid)
unique_uids = set()
uidmid2ratings = {}
with open(RATINGS_FILE, "r") as frat:
for line in frat:
if line.startswith("\"userId"):
continue
cols = line.strip().split(",")
uid, mid, rating = cols[0], cols[1], cols[2]
uid = int(uid)
mid = int(mid)
rating = float(rating)
unique_uids.add(uid)
uidmid2ratings[(uid, mid)] = rating
uid_list = sorted(list(unique_uids))
num_users = len(uid_list)
num_movies = len(mid_list)
uid2index = {x: i for i, x in enumerate(uid_list)}
mid2index = {x: i for i, x in enumerate(mid_list)}
rows, cols, data = [], [], []
for uid in uid_list:
for mid in mid_list:
try:
data.append(uidmid2ratings[(uid, mid)])
rows.append(uid2index[uid])
cols.append(mid2index[mid])
except KeyError:
continue
ratings = csr_matrix((np.array(data),
(np.array(rows), np.array(cols))),
shape=(num_users, num_movies), dtype=np.float32)
scaler = MaxAbsScaler()
ratings = scaler.fit_transform(ratings)
X = ratings.todense()
movie_id2title = {}
mid_list = []
with open(MOVIES_FILE, "r") as fmov:
for line in fmov:
if line.startswith("\"movieId"):
continue
cols = line.strip().split(",")
mid, mtitle = cols[0], cols[1]
mid = int(mid)
mtitle = mtitle.strip("\"")
movie_id2title[mid] = mtitle
mid_list.append(mid)
unique_uids = set()
uidmid2ratings = {}
with open(RATINGS_FILE, "r") as frat:
for line in frat:
if line.startswith("\"userId"):
continue
cols = line.strip().split(",")
uid, mid, rating = cols[0], cols[1], cols[2]
uid = int(uid)
mid = int(mid)
rating = float(rating)
unique_uids.add(uid)
uidmid2ratings[(uid, mid)] = rating
uid_list = sorted(list(unique_uids))
rows, cols, data = [], [], []
for uid in uid_list:
for mid in mid_list:
try:
data.append(uidmid2ratings[(uid, mid)])
rows.append(uid2index[uid])
cols.append(mid2index[mid])
except KeyError:
continue
ratings = csr_matrix((np.array(data),
(np.array(rows), np.array(cols))),
shape=(num_users, num_movies), dtype=np.float32)
scaler = MaxAbsScaler()
ratings = scaler.fit_transform(ratings)
X = ratings.todense()
print("X.shape:", X.shape)
# matrix index to id mappings
user_idx2id = build_numpy_lookup(uid2index, num_users)
movie_idx2id = build_numpy_lookup(mid2index, num_movies)
return X, user_idx2id, movie_idx2id
####################################### main ##########################################
DATA_DIR = Path("../../data")
MOVIES_FILE = DATA_DIR / "movies.csv"
RATINGS_FILE = DATA_DIR / "ratings.csv"
WEIGHTS_FILE = DATA_DIR / "mf-weights.h5"
EMBEDDING_SIZE = 15
BATCH_SIZE = 1
NUM_EPOCHS = 5
X, user_idx2id, movie_idx2id = load_data()
model = MatrixFactorizer(EMBEDDING_SIZE)
model.build(input_shape=X.shape)
model.summary()
optimizer = tf.optimizers.RMSprop(learning_rate=1e-3, momentum=0.9)
losses, steps = [], []
for i in range(1000):
with tf.GradientTape() as tape:
Xhat = model(X)
loss = loss_fn(X, Xhat)
if i % 100 == 0:
loss_value = loss.numpy()
losses.append(loss_value)
steps.append(i)
print("step: {:d}, loss: {:.3f}".format(i, loss_value))
variables = model.trainable_variables
gradients = tape.gradient(loss, variables)
optimizer.apply_gradients(zip(gradients, variables))
# plot training loss
plt.plot(steps, losses, marker="o")
plt.xlabel("steps")
plt.ylabel("loss")
plt.show()
# save weights from trained model
with h5py.File(WEIGHTS_FILE, "w") as hf:
for layer in model.layers:
if layer.name == "matrix_factorization":
for weight in layer.weights:
weight_name = weight.name.split("/")[1].split(":")[0]
weight_value = weight.numpy()
hf.create_dataset(weight_name, data=weight_value)
hf.create_dataset("user_idx2id", data=user_idx2id)
hf.create_dataset("movie_idx2id", data=movie_idx2id)
| apache-2.0 |
Lyleo/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/backends/__init__.py | 72 | 2225 |
import matplotlib
import inspect
import warnings
# ipython relies on interactive_bk being defined here
from matplotlib.rcsetup import interactive_bk
__all__ = ['backend','show','draw_if_interactive',
'new_figure_manager', 'backend_version']
backend = matplotlib.get_backend() # validates, to match all_backends
def pylab_setup():
'return new_figure_manager, draw_if_interactive and show for pylab'
# Import the requested backend into a generic module object
if backend.startswith('module://'):
backend_name = backend[9:]
else:
backend_name = 'backend_'+backend
backend_name = backend_name.lower() # until we banish mixed case
backend_name = 'matplotlib.backends.%s'%backend_name.lower()
backend_mod = __import__(backend_name,
globals(),locals(),[backend_name])
# Things we pull in from all backends
new_figure_manager = backend_mod.new_figure_manager
# image backends like pdf, agg or svg do not need to do anything
# for "show" or "draw_if_interactive", so if they are not defined
# by the backend, just do nothing
def do_nothing_show(*args, **kwargs):
frame = inspect.currentframe()
fname = frame.f_back.f_code.co_filename
if fname in ('<stdin>', '<ipython console>'):
warnings.warn("""
Your currently selected backend, '%s' does not support show().
Please select a GUI backend in your matplotlibrc file ('%s')
or with matplotlib.use()""" %
(backend, matplotlib.matplotlib_fname()))
def do_nothing(*args, **kwargs): pass
backend_version = getattr(backend_mod,'backend_version', 'unknown')
show = getattr(backend_mod, 'show', do_nothing_show)
draw_if_interactive = getattr(backend_mod, 'draw_if_interactive', do_nothing)
# Additional imports which only happen for certain backends. This section
# should probably disappear once all backends are uniform.
if backend.lower() in ['wx','wxagg']:
Toolbar = backend_mod.Toolbar
__all__.append('Toolbar')
matplotlib.verbose.report('backend %s version %s' % (backend,backend_version))
return new_figure_manager, draw_if_interactive, show
| gpl-3.0 |
victorbergelin/scikit-learn | examples/cluster/plot_lena_segmentation.py | 271 | 2444 | """
=========================================
Segmenting the picture of Lena in regions
=========================================
This example uses :ref:`spectral_clustering` on a graph created from
voxel-to-voxel difference on an image to break this image into multiple
partly-homogeneous regions.
This procedure (spectral clustering on an image) is an efficient
approximate solution for finding normalized graph cuts.
There are two options to assign labels:
* with 'kmeans' spectral clustering will cluster samples in the embedding space
using a kmeans algorithm
* whereas 'discrete' will iteratively search for the closest partition
space to the embedding space.
"""
print(__doc__)
# Author: Gael Varoquaux <[email protected]>, Brian Cheung
# License: BSD 3 clause
import time
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
from sklearn.feature_extraction import image
from sklearn.cluster import spectral_clustering
lena = sp.misc.lena()
# Downsample the image by a factor of 4
lena = lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2] + lena[1::2, 1::2]
lena = lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2] + lena[1::2, 1::2]
# Convert the image into a graph with the value of the gradient on the
# edges.
graph = image.img_to_graph(lena)
# Take a decreasing function of the gradient: an exponential
# The smaller beta is, the more independent the segmentation is of the
# actual image. For beta=1, the segmentation is close to a voronoi
beta = 5
eps = 1e-6
graph.data = np.exp(-beta * graph.data / lena.std()) + eps
# Apply spectral clustering (this step goes much faster if you have pyamg
# installed)
N_REGIONS = 11
###############################################################################
# Visualize the resulting regions
for assign_labels in ('kmeans', 'discretize'):
t0 = time.time()
labels = spectral_clustering(graph, n_clusters=N_REGIONS,
assign_labels=assign_labels,
random_state=1)
t1 = time.time()
labels = labels.reshape(lena.shape)
plt.figure(figsize=(5, 5))
plt.imshow(lena, cmap=plt.cm.gray)
for l in range(N_REGIONS):
plt.contour(labels == l, contours=1,
colors=[plt.cm.spectral(l / float(N_REGIONS)), ])
plt.xticks(())
plt.yticks(())
plt.title('Spectral clustering: %s, %.2fs' % (assign_labels, (t1 - t0)))
plt.show()
| bsd-3-clause |
cbmoore/statsmodels | statsmodels/regression/tests/test_robustcov.py | 25 | 31412 | # -*- coding: utf-8 -*-
"""Testing OLS robust covariance matrices against STATA
Created on Mon Oct 28 15:25:14 2013
Author: Josef Perktold
"""
import numpy as np
from scipy import stats
from numpy.testing import (assert_allclose, assert_equal, assert_warns,
assert_raises)
from statsmodels.regression.linear_model import OLS, WLS
import statsmodels.stats.sandwich_covariance as sw
from statsmodels.tools.tools import add_constant
from statsmodels.datasets import macrodata
from statsmodels.tools.sm_exceptions import InvalidTestWarning
from .results import results_macro_ols_robust as res
from .results import results_grunfeld_ols_robust_cluster as res2
#test_hac_simple():
class CheckOLSRobust(object):
def test_basic(self):
res1 = self.res1
res2 = self.res2
rtol = getattr(self, 'rtol', 1e-10)
assert_allclose(res1.params, res2.params, rtol=rtol)
assert_allclose(self.bse_robust, res2.bse, rtol=rtol)
assert_allclose(self.cov_robust, res2.cov, rtol=rtol)
def test_tests(self):
# Note: differences between small (t-distribution, ddof) and large (normal)
# F statistic has no ddof correction in large, but uses F distribution (?)
res1 = self.res1
res2 = self.res2
rtol = getattr(self, 'rtol', 1e-10)
rtolh = getattr(self, 'rtolh', 1e-12)
mat = np.eye(len(res1.params))
tt = res1.t_test(mat, cov_p=self.cov_robust)
# has 'effect', 'pvalue', 'sd', 'tvalue'
# TODO confint missing
assert_allclose(tt.effect, res2.params, rtol=rtol)
assert_allclose(tt.sd, res2.bse, rtol=rtol)
assert_allclose(tt.tvalue, res2.tvalues, rtol=rtol)
if self.small:
assert_allclose(tt.pvalue, res2.pvalues, rtol=5 * rtol)
else:
pval = stats.norm.sf(np.abs(tt.tvalue)) * 2
assert_allclose(pval, res2.pvalues, rtol=5 * rtol, atol=1e-25)
ft = res1.f_test(mat[:-1], cov_p=self.cov_robust)
if self.small:
#'df_denom', 'df_num', 'fvalue', 'pvalue'
assert_allclose(ft.fvalue, res2.F, rtol=rtol)
# f-pvalue is not directly available in Stata results, but is in ivreg2
if hasattr(res2, 'Fp'):
assert_allclose(ft.pvalue, res2.Fp, rtol=rtol)
else:
if not getattr(self, 'skip_f', False):
dof_corr = res1.df_resid * 1. / res1.nobs
assert_allclose(ft.fvalue * dof_corr, res2.F, rtol=rtol)
if hasattr(res2, 'df_r'):
assert_equal(ft.df_num, res2.df_m)
assert_equal(ft.df_denom, res2.df_r)
else:
# ivreg2
assert_equal(ft.df_num, res2.Fdf1)
assert_equal(ft.df_denom, res2.Fdf2)
# SMOKE
tt.summary()
ft.summary()
tt.summary_frame()
class TestOLSRobust1(CheckOLSRobust):
# compare with regress robust
def setup(self):
res_ols = self.res1
self.bse_robust = res_ols.HC1_se
self.cov_robust = res_ols.cov_HC1
self.small = True
self.res2 = res.results_hc0
@classmethod
def setup_class(cls):
d2 = macrodata.load().data
g_gdp = 400*np.diff(np.log(d2['realgdp']))
g_inv = 400*np.diff(np.log(d2['realinv']))
exogg = add_constant(np.c_[g_gdp, d2['realint'][:-1]], prepend=False)
cls.res1 = res_ols = OLS(g_inv, exogg).fit()
class TestOLSRobust2(TestOLSRobust1):
# compare with ivreg robust small
def setup(self):
res_ols = self.res1
self.bse_robust = res_ols.HC1_se
self.cov_robust = res_ols.cov_HC1
self.small = True
self.res2 = res.results_ivhc0_small
class TestOLSRobust3(TestOLSRobust1):
# compare with ivreg robust (not small)
def setup(self):
res_ols = self.res1
self.bse_robust = res_ols.HC0_se
self.cov_robust = res_ols.cov_HC0
self.small = False
self.res2 = res.results_ivhc0_large
class TestOLSRobustHacSmall(TestOLSRobust1):
# compare with ivreg robust small
def setup(self):
res_ols = self.res1
cov1 = sw.cov_hac_simple(res_ols, nlags=4, use_correction=True)
se1 = sw.se_cov(cov1)
self.bse_robust = se1
self.cov_robust = cov1
self.small = True
self.res2 = res.results_ivhac4_small
class TestOLSRobustHacLarge(TestOLSRobust1):
# compare with ivreg robust (not small)
def setup(self):
res_ols = self.res1
cov1 = sw.cov_hac_simple(res_ols, nlags=4, use_correction=False)
se1 = sw.se_cov(cov1)
self.bse_robust = se1
self.cov_robust = cov1
self.small = False
self.res2 = res.results_ivhac4_large
class CheckOLSRobustNewMixin(object):
# This uses the robust covariance as default covariance
def test_compare(self):
rtol = getattr(self, 'rtol', 1e-10)
assert_allclose(self.cov_robust, self.cov_robust2, rtol=rtol)
assert_allclose(self.bse_robust, self.bse_robust2, rtol=rtol)
def test_fvalue(self):
if not getattr(self, 'skip_f', False):
rtol = getattr(self, 'rtol', 1e-10)
assert_allclose(self.res1.fvalue, self.res2.F, rtol=rtol)
if hasattr(self.res2, 'Fp'):
#only available with ivreg2
assert_allclose(self.res1.f_pvalue, self.res2.Fp, rtol=rtol)
def test_confint(self):
rtol = getattr(self, 'rtol', 1e-10)
ci1 = self.res1.conf_int()
ci2 = self.res2.params_table[:,4:6]
assert_allclose(ci1, ci2, rtol=rtol)
# check critical value
crit1 = np.diff(ci1, 1).ravel() / 2 / self.res1.bse
crit2 = np.diff(ci1, 1).ravel() / 2 / self.res1.bse
assert_allclose(crit1, crit2, rtol=12)
def test_ttest(self):
res1 = self.res1
res2 = self.res2
rtol = getattr(self, 'rtol', 1e-10)
rtolh = getattr(self, 'rtol', 1e-12)
mat = np.eye(len(res1.params))
tt = res1.t_test(mat, cov_p=self.cov_robust)
# has 'effect', 'pvalue', 'sd', 'tvalue'
# TODO confint missing
assert_allclose(tt.effect, res2.params, rtol=rtolh)
assert_allclose(tt.sd, res2.bse, rtol=rtol)
assert_allclose(tt.tvalue, res2.tvalues, rtol=rtolh)
assert_allclose(tt.pvalue, res2.pvalues, rtol=5 * rtol)
ci1 = tt.conf_int()
ci2 = self.res2.params_table[:,4:6]
assert_allclose(ci1, ci2, rtol=rtol)
def test_scale(self):
res1 = self.res1
res2 = self.res2
rtol = 1e-5
# Note we always use df_resid for scale
# Stata uses nobs or df_resid for rmse, not always available in Stata
#assert_allclose(res1.scale, res2.rmse**2 * res2.N / (res2.N - res2.df_m - 1), rtol=rtol)
skip = False
if hasattr(res2, 'rss'):
scale = res2.rss / (res2.N - res2.df_m - 1)
elif hasattr(res2, 'rmse'):
scale = res2.rmse**2
else:
skip = True
if isinstance(res1.model, WLS):
skip = True
# Stata uses different scaling and using unweighted resid for rmse
if not skip:
assert_allclose(res1.scale, scale, rtol=rtol)
if not res2.vcetype == 'Newey-West':
# no rsquared in Stata
r2 = res2.r2 if hasattr(res2, 'r2') else res2.r2c
assert_allclose(res1.rsquared, r2, rtol=rtol, err_msg=str(skip))
# consistency checks, not against Stata
df_resid = res1.nobs - res1.df_model - 1
assert_equal(res1.df_resid, df_resid)
# variance of resid_pearson is 1, with ddof, and loc=0
psum = (res1.resid_pearson**2).sum()
assert_allclose(psum, df_resid, rtol=1e-13)
def test_smoke(self):
self.res1.summary()
class TestOLSRobust2SmallNew(TestOLSRobust1, CheckOLSRobustNewMixin):
# compare with ivreg robust small
def setup(self):
res_ols = self.res1.get_robustcov_results('HC1', use_t=True)
self.res3 = self.res1
self.res1 = res_ols
self.bse_robust = res_ols.bse
self.cov_robust = res_ols.cov_params()
self.bse_robust2 = res_ols.HC1_se
self.cov_robust2 = res_ols.cov_HC1
self.small = True
self.res2 = res.results_ivhc0_small
def test_compare(self):
#check that we get a warning using the nested compare methods
res1 = self.res1
endog = res1.model.endog
exog = res1.model.exog[:, [0, 2]] # drop one variable
res_ols2 = OLS(endog, exog).fit()
# results from Stata
r_pval = .0307306938402991
r_chi2 = 4.667944083588736
r_df = 1
assert_warns(InvalidTestWarning, res1.compare_lr_test, res_ols2)
import warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore")
chi2, pval, df = res1.compare_lr_test(res_ols2)
assert_allclose(chi2, r_chi2, rtol=1e-11)
assert_allclose(pval, r_pval, rtol=1e-11)
assert_equal(df, r_df)
assert_warns(InvalidTestWarning, res1.compare_f_test, res_ols2)
#fva, pval, df = res1.compare_f_test(res_ols2)
class TestOLSRobustHACSmallNew(TestOLSRobust1, CheckOLSRobustNewMixin):
# compare with ivreg robust small
def setup(self):
res_ols = self.res1.get_robustcov_results('HAC', maxlags=4,
use_correction=True, use_t=True)
self.res3 = self.res1
self.res1 = res_ols
self.bse_robust = res_ols.bse
self.cov_robust = res_ols.cov_params()
cov1 = sw.cov_hac_simple(res_ols, nlags=4, use_correction=True)
se1 = sw.se_cov(cov1)
self.bse_robust2 = se1
self.cov_robust2 = cov1
self.small = True
self.res2 = res.results_ivhac4_small
class TestOLSRobust2LargeNew(TestOLSRobust1, CheckOLSRobustNewMixin):
# compare with ivreg robust small
def setup(self):
res_ols = self.res1.get_robustcov_results('HC0')
res_ols.use_t = False
self.res3 = self.res1
self.res1 = res_ols
self.bse_robust = res_ols.bse
self.cov_robust = res_ols.cov_params()
self.bse_robust2 = res_ols.HC0_se
self.cov_robust2 = res_ols.cov_HC0
self.small = False
self.res2 = res.results_ivhc0_large
# TODO: skipping next two for now, not refactored yet for `large`
def test_fvalue(self):
pass
def test_confint(self):
pass
#######################################################
# cluster robust standard errors
#######################################################
class CheckOLSRobustCluster(CheckOLSRobust):
# compare with regress robust
@classmethod
def setup_class(cls):
#import pandas as pa
from statsmodels.datasets import grunfeld
dtapa = grunfeld.data.load_pandas()
#Stata example/data seems to miss last firm
dtapa_endog = dtapa.endog[:200]
dtapa_exog = dtapa.exog[:200]
exog = add_constant(dtapa_exog[['value', 'capital']], prepend=False)
#asserts don't work for pandas
cls.res1 = OLS(dtapa_endog, exog).fit()
firm_names, firm_id = np.unique(np.asarray(dtapa_exog[['firm']], 'S20'),
return_inverse=True)
cls.groups = firm_id
#time indicator in range(max Ti)
time = np.asarray(dtapa_exog[['year']])
time -= time.min()
cls.time = np.squeeze(time).astype(int)
# nw_panel function requires interval bounds
cls.tidx = [(i*20, 20*(i+1)) for i in range(10)]
class TestOLSRobustCluster2(CheckOLSRobustCluster, CheckOLSRobustNewMixin):
# compare with `reg cluster`
def setup(self):
res_ols = self.res1.get_robustcov_results('cluster',
groups=self.groups,
use_correction=True,
use_t=True)
self.res3 = self.res1
self.res1 = res_ols
self.bse_robust = res_ols.bse
self.cov_robust = res_ols.cov_params()
cov1 = sw.cov_cluster(self.res1, self.groups, use_correction=True)
se1 = sw.se_cov(cov1)
self.bse_robust2 = se1
self.cov_robust2 = cov1
self.small = True
self.res2 = res2.results_cluster
self.rtol = 1e-6
self.rtolh = 1e-10
class TestOLSRobustCluster2Input(CheckOLSRobustCluster, CheckOLSRobustNewMixin):
# compare with `reg cluster`
def setup(self):
import pandas as pd
fat_array = self.groups.reshape(-1, 1)
fat_groups = pd.DataFrame(fat_array)
res_ols = self.res1.get_robustcov_results('cluster',
groups=fat_groups,
use_correction=True,
use_t=True)
self.res3 = self.res1
self.res1 = res_ols
self.bse_robust = res_ols.bse
self.cov_robust = res_ols.cov_params()
cov1 = sw.cov_cluster(self.res1, self.groups, use_correction=True)
se1 = sw.se_cov(cov1)
self.bse_robust2 = se1
self.cov_robust2 = cov1
self.small = True
self.res2 = res2.results_cluster
self.rtol = 1e-6
self.rtolh = 1e-10
def test_too_many_groups(self):
long_groups = self.groups.reshape(-1, 1)
groups3 = np.hstack((long_groups, long_groups, long_groups))
assert_raises(ValueError, self.res1.get_robustcov_results,'cluster',
groups=groups3, use_correction=True, use_t=True)
def test_2way_dataframe(self):
import pandas as pd
long_groups = self.groups.reshape(-1, 1)
groups2 = pd.DataFrame(np.hstack((long_groups, long_groups)))
res = self.res1.get_robustcov_results(
'cluster', groups=groups2, use_correction=True, use_t=True)
class TestOLSRobustCluster2Fit(CheckOLSRobustCluster, CheckOLSRobustNewMixin):
# copy, past uses fit method
# compare with `reg cluster`
def setup(self):
res_ols = self.res1.model.fit(cov_type='cluster',
cov_kwds=dict(
groups=self.groups,
use_correction=True,
use_t=True))
self.res3 = self.res1
self.res1 = res_ols
self.bse_robust = res_ols.bse
self.cov_robust = res_ols.cov_params()
cov1 = sw.cov_cluster(self.res1, self.groups, use_correction=True)
se1 = sw.se_cov(cov1)
self.bse_robust2 = se1
self.cov_robust2 = cov1
self.small = True
self.res2 = res2.results_cluster
self.rtol = 1e-6
self.rtolh = 1e-10
def test_basic_inference(self):
res1 = self.res1
res2 = self.res2
rtol = 1e-7
assert_allclose(res1.params, res2.params, rtol=1e-8)
assert_allclose(res1.bse, res2.bse, rtol=rtol)
assert_allclose(res1.pvalues, res2.pvalues, rtol=rtol, atol=1e-20)
ci = res2.params_table[:, 4:6]
assert_allclose(res1.conf_int(), ci, rtol=5e-7, atol=1e-20)
class TestOLSRobustCluster2Large(CheckOLSRobustCluster, CheckOLSRobustNewMixin):
# compare with `reg cluster`
def setup(self):
res_ols = self.res1.get_robustcov_results('cluster',
groups=self.groups,
use_correction=False,
use_t=False,
df_correction=True)
self.res3 = self.res1
self.res1 = res_ols
self.bse_robust = res_ols.bse
self.cov_robust = res_ols.cov_params()
cov1 = sw.cov_cluster(self.res1, self.groups, use_correction=False)
se1 = sw.se_cov(cov1)
self.bse_robust2 = se1
self.cov_robust2 = cov1
self.small = False
self.res2 = res2.results_cluster_large
self.skip_f = True
self.rtol = 1e-6
self.rtolh = 1e-10
# skipping see https://github.com/statsmodels/statsmodels/pull/1189#issuecomment-29141741
def test_f_value(self):
pass
class TestOLSRobustCluster2LargeFit(CheckOLSRobustCluster, CheckOLSRobustNewMixin):
# compare with `reg cluster`
def setup(self):
model = OLS(self.res1.model.endog, self.res1.model.exog)
#res_ols = self.res1.model.fit(cov_type='cluster',
res_ols = model.fit(cov_type='cluster',
cov_kwds=dict(groups=self.groups,
use_correction=False,
use_t=False,
df_correction=True))
self.res3 = self.res1
self.res1 = res_ols
self.bse_robust = res_ols.bse
self.cov_robust = res_ols.cov_params()
cov1 = sw.cov_cluster(self.res1, self.groups, use_correction=False)
se1 = sw.se_cov(cov1)
self.bse_robust2 = se1
self.cov_robust2 = cov1
self.small = False
self.res2 = res2.results_cluster_large
self.skip_f = True
self.rtol = 1e-6
self.rtolh = 1e-10
# skipping see https://github.com/statsmodels/statsmodels/pull/1189#issuecomment-29141741
def t_est_fvalue(self):
pass
class TestOLSRobustClusterGS(CheckOLSRobustCluster, CheckOLSRobustNewMixin):
# compare with `reg cluster`
def setup(self):
res_ols = self.res1.get_robustcov_results('nw-groupsum',
time=self.time,
maxlags=4,
use_correction=False,
use_t=True)
self.res3 = self.res1
self.res1 = res_ols
self.bse_robust = res_ols.bse
self.cov_robust = res_ols.cov_params()
cov1 = sw.cov_nw_groupsum(self.res1, 4, self.time, use_correction=False)
se1 = sw.se_cov(cov1)
self.bse_robust2 = se1
self.cov_robust2 = cov1
self.small = True
self.res2 = res2.results_nw_groupsum4
self.skip_f = True
self.rtol = 1e-6
self.rtolh = 1e-10
class TestOLSRobustClusterGSFit(CheckOLSRobustCluster, CheckOLSRobustNewMixin):
# compare with `reg cluster`
def setup(self):
res_ols = self.res1.model.fit(cov_type='nw-groupsum',
cov_kwds=dict(time=self.time,
maxlags=4,
use_correction=False,
use_t=True))
self.res3 = self.res1
self.res1 = res_ols
self.bse_robust = res_ols.bse
self.cov_robust = res_ols.cov_params()
cov1 = sw.cov_nw_groupsum(self.res1, 4, self.time, use_correction=False)
se1 = sw.se_cov(cov1)
self.bse_robust2 = se1
self.cov_robust2 = cov1
self.small = True
self.res2 = res2.results_nw_groupsum4
self.skip_f = True
self.rtol = 1e-6
self.rtolh = 1e-10
class TestOLSRobustClusterNWP(CheckOLSRobustCluster, CheckOLSRobustNewMixin):
# compare with `reg cluster`
def setup(self):
res_ols = self.res1.get_robustcov_results('nw-panel',
time=self.time,
maxlags=4,
use_correction='hac',
use_t=True,
df_correction=False)
self.res3 = self.res1
self.res1 = res_ols
self.bse_robust = res_ols.bse
self.cov_robust = res_ols.cov_params()
cov1 = sw.cov_nw_panel(self.res1, 4, self.tidx)
se1 = sw.se_cov(cov1)
self.bse_robust2 = se1
self.cov_robust2 = cov1
self.small = True
self.res2 = res2.results_nw_panel4
self.skip_f = True
self.rtol = 1e-6
self.rtolh = 1e-10
# TODO: low precision/agreement
class TestOLSRobustCluster2G(CheckOLSRobustCluster, CheckOLSRobustNewMixin):
# compare with `reg cluster`
def setup(self):
res_ols = self.res1.get_robustcov_results('cluster',
groups=(self.groups, self.time),
use_correction=True,
use_t=True)
self.res3 = self.res1
self.res1 = res_ols
self.bse_robust = res_ols.bse
self.cov_robust = res_ols.cov_params()
cov1 = sw.cov_cluster_2groups(self.res1, self.groups, group2=self.time,
use_correction=True)[0]
se1 = sw.se_cov(cov1)
self.bse_robust2 = se1
self.cov_robust2 = cov1
self.small = True
self.res2 = res2.results_cluster_2groups_small
self.rtol = 0.35 # only f_pvalue and confint for constant differ >rtol=0.05
self.rtolh = 1e-10
class TestOLSRobustCluster2GLarge(CheckOLSRobustCluster, CheckOLSRobustNewMixin):
# compare with `reg cluster`
def setup(self):
res_ols = self.res1.get_robustcov_results('cluster',
groups=(self.groups, self.time),
use_correction=False, #True,
use_t=False)
self.res3 = self.res1
self.res1 = res_ols
self.bse_robust = res_ols.bse
self.cov_robust = res_ols.cov_params()
cov1 = sw.cov_cluster_2groups(self.res1, self.groups, group2=self.time,
use_correction=False)[0]
se1 = sw.se_cov(cov1)
self.bse_robust2 = se1
self.cov_robust2 = cov1
self.small = False
self.res2 = res2.results_cluster_2groups_large
self.skip_f = True
self.rtol = 1e-7
self.rtolh = 1e-10
######################################
# WLS
######################################
class CheckWLSRobustCluster(CheckOLSRobust):
# compare with regress robust
@classmethod
def setup_class(cls):
#import pandas as pa
from statsmodels.datasets import grunfeld
dtapa = grunfeld.data.load_pandas()
#Stata example/data seems to miss last firm
dtapa_endog = dtapa.endog[:200]
dtapa_exog = dtapa.exog[:200]
exog = add_constant(dtapa_exog[['value', 'capital']], prepend=False)
#asserts don't work for pandas
cls.res1 = WLS(dtapa_endog, exog, weights=1/dtapa_exog['value']).fit()
firm_names, firm_id = np.unique(np.asarray(dtapa_exog[['firm']], 'S20'),
return_inverse=True)
cls.groups = firm_id
#time indicator in range(max Ti)
time = np.asarray(dtapa_exog[['year']])
time -= time.min()
cls.time = np.squeeze(time).astype(int)
# nw_panel function requires interval bounds
cls.tidx = [(i*20, 20*(i+1)) for i in range(10)]
# not available yet for WLS
class TestWLSRobustCluster2(CheckWLSRobustCluster, CheckOLSRobustNewMixin):
# compare with `reg cluster`
def setup(self):
res_ols = self.res1.get_robustcov_results('cluster',
groups=self.groups,
use_correction=True,
use_t=True)
self.res3 = self.res1
self.res1 = res_ols
self.bse_robust = res_ols.bse
self.cov_robust = res_ols.cov_params()
cov1 = sw.cov_cluster(self.res1, self.groups, use_correction=True)
se1 = sw.se_cov(cov1)
self.bse_robust2 = se1
self.cov_robust2 = cov1
self.small = True
self.res2 = res2.results_cluster_wls_small
self.rtol = 1e-6
self.rtolh = 1e-10
# not available yet for WLS
class TestWLSRobustCluster2Large(CheckWLSRobustCluster, CheckOLSRobustNewMixin):
# compare with `reg cluster`
def setup(self):
res_ols = self.res1.get_robustcov_results('cluster',
groups=self.groups,
use_correction=False,
use_t=False,
df_correction=True)
self.res3 = self.res1
self.res1 = res_ols
self.bse_robust = res_ols.bse
self.cov_robust = res_ols.cov_params()
cov1 = sw.cov_cluster(self.res1, self.groups, use_correction=False)
se1 = sw.se_cov(cov1)
self.bse_robust2 = se1
self.cov_robust2 = cov1
self.small = False
self.res2 = res2.results_cluster_wls_large
self.skip_f = True
self.rtol = 1e-6
self.rtolh = 1e-10
class TestWLSRobustSmall(CheckWLSRobustCluster, CheckOLSRobustNewMixin):
# compare with `reg cluster`
def setup(self):
res_ols = self.res1.get_robustcov_results('HC1',
use_t=True)
self.res3 = self.res1
self.res1 = res_ols
self.bse_robust = res_ols.bse
self.cov_robust = res_ols.cov_params()
#TODO: check standalone function
#cov1 = sw.cov_cluster(self.res1, self.groups, use_correction=False)
cov1 = res_ols.cov_HC1
se1 = sw.se_cov(cov1)
self.bse_robust2 = se1
self.cov_robust2 = cov1
self.small = True
self.res2 = res2.results_hc1_wls_small
self.skip_f = True
self.rtol = 1e-6
self.rtolh = 1e-10
class TestWLSOLSRobustSmall(object):
@classmethod
def setup_class(cls):
#import pandas as pa
from statsmodels.datasets import grunfeld
dtapa = grunfeld.data.load_pandas()
#Stata example/data seems to miss last firm
dtapa_endog = dtapa.endog[:200]
dtapa_exog = dtapa.exog[:200]
exog = add_constant(dtapa_exog[['value', 'capital']], prepend=False)
#asserts don't work for pandas
cls.res_wls = WLS(dtapa_endog, exog, weights=1/dtapa_exog['value']).fit()
w_sqrt = 1 / np.sqrt(np.asarray(dtapa_exog['value']))
cls.res_ols = OLS(dtapa_endog * w_sqrt,
np.asarray(exog) * w_sqrt[:, None]).fit() # hasconst=True ?
firm_names, firm_id = np.unique(np.asarray(dtapa_exog[['firm']], 'S20'),
return_inverse=True)
cls.groups = firm_id
#time indicator in range(max Ti)
time = np.asarray(dtapa_exog[['year']])
time -= time.min()
cls.time = np.squeeze(time).astype(int)
# nw_panel function requires interval bounds
cls.tidx = [(i*20, 20*(i+1)) for i in range(10)]
def test_all(self):
all_cov = [('HC0', dict(use_t=True)),
('HC1', dict(use_t=True)),
('HC2', dict(use_t=True)),
('HC3', dict(use_t=True))]
# fvalue are not the same, see #1212
#res_ols = self.res_ols
#res_wls = self.res_wls
#assert_allclose(res_ols.fvalue, res_wls.fvalue, rtol=1e-13)
#assert_allclose(res_ols.f_pvalue, res_wls.f_pvalue, rtol=1e-13)
for cov_type, kwds in all_cov:
res1 = self.res_ols.get_robustcov_results(cov_type, **kwds)
res2 = self.res_wls.get_robustcov_results(cov_type, **kwds)
assert_allclose(res1.params, res2.params, rtol=1e-13)
assert_allclose(res1.cov_params(), res2.cov_params(), rtol=1e-13)
assert_allclose(res1.bse, res2.bse, rtol=1e-13)
assert_allclose(res1.pvalues, res2.pvalues, rtol=1e-13)
#Note: Fvalue doesn't match up, difference in calculation ?
# The only difference should be in the constant detection
#assert_allclose(res1.fvalue, res2.fvalue, rtol=1e-13)
#assert_allclose(res1.f_pvalue, res2.f_pvalue, rtol=1e-13)
mat = np.eye(len(res1.params))
ft1 = res1.f_test(mat)
ft2 = res2.f_test(mat)
assert_allclose(ft1.fvalue, ft2.fvalue, rtol=1e-13)
assert_allclose(ft1.pvalue, ft2.pvalue, rtol=1e-12)
def test_fixed_scale(self):
cov_type = 'fixed_scale'
kwds = {}
res1 = self.res_ols.get_robustcov_results(cov_type, **kwds)
res2 = self.res_wls.get_robustcov_results(cov_type, **kwds)
assert_allclose(res1.params, res2.params, rtol=1e-13)
assert_allclose(res1.cov_params(), res2.cov_params(), rtol=1e-13)
assert_allclose(res1.bse, res2.bse, rtol=1e-13)
assert_allclose(res1.pvalues, res2.pvalues, rtol=1e-12)
tt = res2.t_test(np.eye(len(res2.params)),
cov_p=res2.normalized_cov_params)
assert_allclose(res2.cov_params(), res2.normalized_cov_params,
rtol=1e-13)
assert_allclose(res2.bse, tt.sd, rtol=1e-13)
assert_allclose(res2.pvalues, tt.pvalue, rtol=1e-13)
assert_allclose(res2.tvalues, tt.tvalue, rtol=1e-13)
# using cov_type in fit
mod = self.res_wls.model
mod3 = WLS(mod.endog, mod.exog, weights=mod.weights)
res3 = mod3.fit(cov_type=cov_type, cov_kwds=kwds)
tt = res3.t_test(np.eye(len(res3.params)),
cov_p=res3.normalized_cov_params)
assert_allclose(res3.cov_params(), res3.normalized_cov_params,
rtol=1e-13)
assert_allclose(res3.bse, tt.sd, rtol=1e-13)
assert_allclose(res3.pvalues, tt.pvalue, rtol=1e-13)
assert_allclose(res3.tvalues, tt.tvalue, rtol=1e-13)
def test_cov_type_fixed_scale():
# this is a unit test from scipy curvefit for `absolute_sigma` keyword
xdata = np.array([0, 1, 2, 3, 4, 5])
ydata = np.array([1, 1, 5, 7, 8, 12])
sigma = np.array([1, 2, 1, 2, 1, 2])
xdata = np.column_stack((xdata, np.ones(len(xdata))))
weights = 1. / sigma**2
res = WLS(ydata, xdata, weights=weights).fit()
assert_allclose(res.bse, [0.20659803, 0.57204404], rtol=1e-3)
res = WLS(ydata, xdata, weights=weights).fit()
assert_allclose(res.bse, [0.20659803, 0.57204404], rtol=1e-3)
res = WLS(ydata, xdata, weights=weights).fit(cov_type='fixed scale')
assert_allclose(res.bse, [0.30714756, 0.85045308], rtol=1e-3)
res = WLS(ydata, xdata, weights=weights / 9.).fit(cov_type='fixed scale')
assert_allclose(res.bse, [3*0.30714756, 3*0.85045308], rtol=1e-3)
res = WLS(ydata, xdata, weights=weights).fit(cov_type='fixed scale',
cov_kwds={'scale':9})
assert_allclose(res.bse, [3*0.30714756, 3*0.85045308], rtol=1e-3)
| bsd-3-clause |
has2k1/plotnine | plotnine/stats/stat_density.py | 1 | 7516 | from contextlib import suppress
from warnings import warn
import numpy as np
import pandas as pd
import statsmodels.api as sm
from scipy.stats import iqr
from ..mapping.evaluation import after_stat
from ..doctools import document
from ..exceptions import PlotnineError, PlotnineWarning
from .stat import stat
# NOTE: Parameter discriptions are in
# statsmodels/nonparametric/kde.py
@document
class stat_density(stat):
"""
Compute density estimate
{usage}
Parameters
----------
{common_parameters}
kernel : str, optional (default: 'gaussian')
Kernel used for density estimation. One of::
'biweight'
'cosine'
'cosine2'
'epanechnikov'
'gaussian'
'triangular'
'triweight'
'uniform'
adjust : float, optional (default: 1)
An adjustment factor for the ``bw``. Bandwidth becomes
:py:`bw * adjust`.
Adjustment of the bandwidth.
trim : bool, optional (default: False)
This parameter only matters if you are displaying multiple
densities in one plot. If :py:`False`, the default, each
density is computed on the full range of the data. If
:py:`True`, each density is computed over the range of that
group; this typically means the estimated x values will not
line-up, and hence you won't be able to stack density values.
n : int, optional(default: 1024)
Number of equally spaced points at which the density is to
be estimated. For efficient computation, it should be a power
of two.
gridsize : int, optional (default: None)
If gridsize is :py:`None`, :py:`max(len(x), 50)` is used.
bw : str or float, optional (default: 'nrd0')
The bandwidth to use, If a float is given, it is the bandwidth.
The :py:`str` choices are::
'nrd0'
'normal_reference'
'scott'
'silverman'
``nrd0`` is a port of ``stats::bw.nrd0`` in R; it is eqiuvalent
to ``silverman`` when there is more than 1 value in a group.
cut : float, optional (default: 3)
Defines the length of the grid past the lowest and highest
values of ``x`` so that the kernel goes to zero. The end points
are ``-/+ cut*bw*{min(x) or max(x)}``.
clip : tuple, optional (default: (-np.inf, np.inf))
Values in ``x`` that are outside of the range given by clip are
dropped. The number of values in ``x`` is then shortened.
See Also
--------
plotnine.geoms.geom_density
statsmodels.nonparametric.kde.KDEUnivariate
statsmodels.nonparametric.kde.KDEUnivariate.fit
"""
_aesthetics_doc = """
{aesthetics_table}
.. rubric:: Options for computed aesthetics
::
'density' # density estimate
'count' # density * number of points,
# useful for stacked density plots
'scaled' # density estimate, scaled to maximum of 1
"""
REQUIRED_AES = {'x'}
DEFAULT_PARAMS = {'geom': 'density', 'position': 'stack',
'na_rm': False,
'kernel': 'gaussian', 'adjust': 1,
'trim': False, 'n': 1024, 'gridsize': None,
'bw': 'nrd0', 'cut': 3,
'clip': (-np.inf, np.inf)}
DEFAULT_AES = {'y': after_stat('density')}
CREATES = {'density', 'count', 'scaled', 'n'}
def setup_params(self, data):
params = self.params.copy()
lookup = {
'biweight': 'biw',
'cosine': 'cos',
'cosine2': 'cos2',
'epanechnikov': 'epa',
'gaussian': 'gau',
'triangular': 'tri',
'triweight': 'triw',
'uniform': 'uni'}
with suppress(KeyError):
params['kernel'] = lookup[params['kernel'].lower()]
if params['kernel'] not in lookup.values():
msg = ("kernel should be one of {}. "
"You may use the abbreviations {}")
raise PlotnineError(msg.format(lookup.keys(),
lookup.values()))
return params
@classmethod
def compute_group(cls, data, scales, **params):
weight = data.get('weight')
if params['trim']:
range_x = data['x'].min(), data['x'].max()
else:
range_x = scales.x.dimension()
return compute_density(data['x'], weight, range_x, **params)
def compute_density(x, weight, range, **params):
x = np.asarray(x, dtype=float)
not_nan = ~np.isnan(x)
x = x[not_nan]
bw = params['bw']
kernel = params['kernel']
n = len(x)
if n == 0 or (n == 1 and isinstance(bw, str)):
if n == 1:
warn("To compute the density of a group with only one "
"value set the bandwidth manually. e.g `bw=0.1`",
PlotnineWarning)
warn("Groups with fewer than 2 data points have been removed.",
PlotnineWarning)
return pd.DataFrame()
# kde is computed efficiently using fft. But the fft does
# not support weights and is only available with the
# gaussian kernel. When weights are relevant we
# turn off the fft.
if weight is None:
if kernel != 'gau':
weight = np.ones(n) / n
else:
weight = np.asarray(weight, dtype=float)
if kernel == 'gau' and weight is None:
fft = True
else:
fft = False
if bw == 'nrd0':
bw = nrd0(x)
kde = sm.nonparametric.KDEUnivariate(x)
kde.fit(
kernel=kernel,
bw=bw,
fft=fft,
weights=weight,
adjust=params['adjust'],
cut=params['cut'],
gridsize=params['gridsize'],
clip=params['clip']
)
x2 = np.linspace(range[0], range[1], params['n'])
try:
y = kde.evaluate(x2)
if np.isscalar(y) and np.isnan(y):
raise ValueError('kde.evaluate returned nan')
except ValueError:
y = []
for _x in x2:
result = kde.evaluate(_x)
try:
y.append(result[0])
except TypeError:
y.append(result)
y = np.asarray(y)
# Evaluations outside the kernel domain return np.nan,
# these values and corresponding x2s are dropped.
# The kernel domain is defined by the values in x, but
# the evaluated values in x2 could have a much wider range.
not_nan = ~np.isnan(y)
x2 = x2[not_nan]
y = y[not_nan]
return pd.DataFrame({'x': x2,
'density': y,
'scaled': y / np.max(y) if len(y) else [],
'count': y * n,
'n': n})
def nrd0(x):
"""
Port of R stats::bw.nrd0
This is equivalent to statsmodels silverman when x has more than
1 unique value. It can never give a zero bandwidth.
Parameters
----------
x : array_like
Values whose density is to be estimated
Returns
-------
out : float
Bandwidth of x
"""
n = len(x)
if n < 1:
raise ValueError(
"Need at leat 2 data points to compute the nrd0 bandwidth."
)
std = np.std(x, ddof=1)
std_estimate = iqr(x)/1.349
low_std = np.min((std, std_estimate))
if low_std == 0:
low_std = std_estimate or np.abs(np.asarray(x)[0]) or 1
return 0.9 * low_std * (n ** -0.2)
| gpl-2.0 |
hdmetor/scikit-learn | examples/missing_values.py | 233 | 3056 | """
======================================================
Imputing missing values before building an estimator
======================================================
This example shows that imputing the missing values can give better results
than discarding the samples containing any missing value.
Imputing does not always improve the predictions, so please check via cross-validation.
Sometimes dropping rows or using marker values is more effective.
Missing values can be replaced by the mean, the median or the most frequent
value using the ``strategy`` hyper-parameter.
The median is a more robust estimator for data with high magnitude variables
which could dominate results (otherwise known as a 'long tail').
Script output::
Score with the entire dataset = 0.56
Score without the samples containing missing values = 0.48
Score after imputation of the missing values = 0.55
In this case, imputing helps the classifier get close to the original score.
"""
import numpy as np
from sklearn.datasets import load_boston
from sklearn.ensemble import RandomForestRegressor
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import Imputer
from sklearn.cross_validation import cross_val_score
rng = np.random.RandomState(0)
dataset = load_boston()
X_full, y_full = dataset.data, dataset.target
n_samples = X_full.shape[0]
n_features = X_full.shape[1]
# Estimate the score on the entire dataset, with no missing values
estimator = RandomForestRegressor(random_state=0, n_estimators=100)
score = cross_val_score(estimator, X_full, y_full).mean()
print("Score with the entire dataset = %.2f" % score)
# Add missing values in 75% of the lines
missing_rate = 0.75
n_missing_samples = np.floor(n_samples * missing_rate)
missing_samples = np.hstack((np.zeros(n_samples - n_missing_samples,
dtype=np.bool),
np.ones(n_missing_samples,
dtype=np.bool)))
rng.shuffle(missing_samples)
missing_features = rng.randint(0, n_features, n_missing_samples)
# Estimate the score without the lines containing missing values
X_filtered = X_full[~missing_samples, :]
y_filtered = y_full[~missing_samples]
estimator = RandomForestRegressor(random_state=0, n_estimators=100)
score = cross_val_score(estimator, X_filtered, y_filtered).mean()
print("Score without the samples containing missing values = %.2f" % score)
# Estimate the score after imputation of the missing values
X_missing = X_full.copy()
X_missing[np.where(missing_samples)[0], missing_features] = 0
y_missing = y_full.copy()
estimator = Pipeline([("imputer", Imputer(missing_values=0,
strategy="mean",
axis=0)),
("forest", RandomForestRegressor(random_state=0,
n_estimators=100))])
score = cross_val_score(estimator, X_missing, y_missing).mean()
print("Score after imputation of the missing values = %.2f" % score)
| bsd-3-clause |
glouppe/scikit-learn | sklearn/tests/test_isotonic.py | 230 | 11087 | import numpy as np
import pickle
from sklearn.isotonic import (check_increasing, isotonic_regression,
IsotonicRegression)
from sklearn.utils.testing import (assert_raises, assert_array_equal,
assert_true, assert_false, assert_equal,
assert_array_almost_equal,
assert_warns_message, assert_no_warnings)
from sklearn.utils import shuffle
def test_permutation_invariance():
# check that fit is permuation invariant.
# regression test of missing sorting of sample-weights
ir = IsotonicRegression()
x = [1, 2, 3, 4, 5, 6, 7]
y = [1, 41, 51, 1, 2, 5, 24]
sample_weight = [1, 2, 3, 4, 5, 6, 7]
x_s, y_s, sample_weight_s = shuffle(x, y, sample_weight, random_state=0)
y_transformed = ir.fit_transform(x, y, sample_weight=sample_weight)
y_transformed_s = ir.fit(x_s, y_s, sample_weight=sample_weight_s).transform(x)
assert_array_equal(y_transformed, y_transformed_s)
def test_check_increasing_up():
x = [0, 1, 2, 3, 4, 5]
y = [0, 1.5, 2.77, 8.99, 8.99, 50]
# Check that we got increasing=True and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert_true(is_increasing)
def test_check_increasing_up_extreme():
x = [0, 1, 2, 3, 4, 5]
y = [0, 1, 2, 3, 4, 5]
# Check that we got increasing=True and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert_true(is_increasing)
def test_check_increasing_down():
x = [0, 1, 2, 3, 4, 5]
y = [0, -1.5, -2.77, -8.99, -8.99, -50]
# Check that we got increasing=False and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert_false(is_increasing)
def test_check_increasing_down_extreme():
x = [0, 1, 2, 3, 4, 5]
y = [0, -1, -2, -3, -4, -5]
# Check that we got increasing=False and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert_false(is_increasing)
def test_check_ci_warn():
x = [0, 1, 2, 3, 4, 5]
y = [0, -1, 2, -3, 4, -5]
# Check that we got increasing=False and CI interval warning
is_increasing = assert_warns_message(UserWarning, "interval",
check_increasing,
x, y)
assert_false(is_increasing)
def test_isotonic_regression():
y = np.array([3, 7, 5, 9, 8, 7, 10])
y_ = np.array([3, 6, 6, 8, 8, 8, 10])
assert_array_equal(y_, isotonic_regression(y))
x = np.arange(len(y))
ir = IsotonicRegression(y_min=0., y_max=1.)
ir.fit(x, y)
assert_array_equal(ir.fit(x, y).transform(x), ir.fit_transform(x, y))
assert_array_equal(ir.transform(x), ir.predict(x))
# check that it is immune to permutation
perm = np.random.permutation(len(y))
ir = IsotonicRegression(y_min=0., y_max=1.)
assert_array_equal(ir.fit_transform(x[perm], y[perm]),
ir.fit_transform(x, y)[perm])
assert_array_equal(ir.transform(x[perm]), ir.transform(x)[perm])
# check we don't crash when all x are equal:
ir = IsotonicRegression()
assert_array_equal(ir.fit_transform(np.ones(len(x)), y), np.mean(y))
def test_isotonic_regression_ties_min():
# Setup examples with ties on minimum
x = [0, 1, 1, 2, 3, 4, 5]
y = [0, 1, 2, 3, 4, 5, 6]
y_true = [0, 1.5, 1.5, 3, 4, 5, 6]
# Check that we get identical results for fit/transform and fit_transform
ir = IsotonicRegression()
ir.fit(x, y)
assert_array_equal(ir.fit(x, y).transform(x), ir.fit_transform(x, y))
assert_array_equal(y_true, ir.fit_transform(x, y))
def test_isotonic_regression_ties_max():
# Setup examples with ties on maximum
x = [1, 2, 3, 4, 5, 5]
y = [1, 2, 3, 4, 5, 6]
y_true = [1, 2, 3, 4, 5.5, 5.5]
# Check that we get identical results for fit/transform and fit_transform
ir = IsotonicRegression()
ir.fit(x, y)
assert_array_equal(ir.fit(x, y).transform(x), ir.fit_transform(x, y))
assert_array_equal(y_true, ir.fit_transform(x, y))
def test_isotonic_regression_ties_secondary_():
"""
Test isotonic regression fit, transform and fit_transform
against the "secondary" ties method and "pituitary" data from R
"isotone" package, as detailed in: J. d. Leeuw, K. Hornik, P. Mair,
Isotone Optimization in R: Pool-Adjacent-Violators Algorithm
(PAVA) and Active Set Methods
Set values based on pituitary example and
the following R command detailed in the paper above:
> library("isotone")
> data("pituitary")
> res1 <- gpava(pituitary$age, pituitary$size, ties="secondary")
> res1$x
`isotone` version: 1.0-2, 2014-09-07
R version: R version 3.1.1 (2014-07-10)
"""
x = [8, 8, 8, 10, 10, 10, 12, 12, 12, 14, 14]
y = [21, 23.5, 23, 24, 21, 25, 21.5, 22, 19, 23.5, 25]
y_true = [22.22222, 22.22222, 22.22222, 22.22222, 22.22222, 22.22222,
22.22222, 22.22222, 22.22222, 24.25, 24.25]
# Check fit, transform and fit_transform
ir = IsotonicRegression()
ir.fit(x, y)
assert_array_almost_equal(ir.transform(x), y_true, 4)
assert_array_almost_equal(ir.fit_transform(x, y), y_true, 4)
def test_isotonic_regression_reversed():
y = np.array([10, 9, 10, 7, 6, 6.1, 5])
y_ = IsotonicRegression(increasing=False).fit_transform(
np.arange(len(y)), y)
assert_array_equal(np.ones(y_[:-1].shape), ((y_[:-1] - y_[1:]) >= 0))
def test_isotonic_regression_auto_decreasing():
# Set y and x for decreasing
y = np.array([10, 9, 10, 7, 6, 6.1, 5])
x = np.arange(len(y))
# Create model and fit_transform
ir = IsotonicRegression(increasing='auto')
y_ = assert_no_warnings(ir.fit_transform, x, y)
# Check that relationship decreases
is_increasing = y_[0] < y_[-1]
assert_false(is_increasing)
def test_isotonic_regression_auto_increasing():
# Set y and x for decreasing
y = np.array([5, 6.1, 6, 7, 10, 9, 10])
x = np.arange(len(y))
# Create model and fit_transform
ir = IsotonicRegression(increasing='auto')
y_ = assert_no_warnings(ir.fit_transform, x, y)
# Check that relationship increases
is_increasing = y_[0] < y_[-1]
assert_true(is_increasing)
def test_assert_raises_exceptions():
ir = IsotonicRegression()
rng = np.random.RandomState(42)
assert_raises(ValueError, ir.fit, [0, 1, 2], [5, 7, 3], [0.1, 0.6])
assert_raises(ValueError, ir.fit, [0, 1, 2], [5, 7])
assert_raises(ValueError, ir.fit, rng.randn(3, 10), [0, 1, 2])
assert_raises(ValueError, ir.transform, rng.randn(3, 10))
def test_isotonic_sample_weight_parameter_default_value():
# check if default value of sample_weight parameter is one
ir = IsotonicRegression()
# random test data
rng = np.random.RandomState(42)
n = 100
x = np.arange(n)
y = rng.randint(-50, 50, size=(n,)) + 50. * np.log(1 + np.arange(n))
# check if value is correctly used
weights = np.ones(n)
y_set_value = ir.fit_transform(x, y, sample_weight=weights)
y_default_value = ir.fit_transform(x, y)
assert_array_equal(y_set_value, y_default_value)
def test_isotonic_min_max_boundaries():
# check if min value is used correctly
ir = IsotonicRegression(y_min=2, y_max=4)
n = 6
x = np.arange(n)
y = np.arange(n)
y_test = [2, 2, 2, 3, 4, 4]
y_result = np.round(ir.fit_transform(x, y))
assert_array_equal(y_result, y_test)
def test_isotonic_sample_weight():
ir = IsotonicRegression()
x = [1, 2, 3, 4, 5, 6, 7]
y = [1, 41, 51, 1, 2, 5, 24]
sample_weight = [1, 2, 3, 4, 5, 6, 7]
expected_y = [1, 13.95, 13.95, 13.95, 13.95, 13.95, 24]
received_y = ir.fit_transform(x, y, sample_weight=sample_weight)
assert_array_equal(expected_y, received_y)
def test_isotonic_regression_oob_raise():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="raise")
ir.fit(x, y)
# Check that an exception is thrown
assert_raises(ValueError, ir.predict, [min(x) - 10, max(x) + 10])
def test_isotonic_regression_oob_clip():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="clip")
ir.fit(x, y)
# Predict from training and test x and check that min/max match.
y1 = ir.predict([min(x) - 10, max(x) + 10])
y2 = ir.predict(x)
assert_equal(max(y1), max(y2))
assert_equal(min(y1), min(y2))
def test_isotonic_regression_oob_nan():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="nan")
ir.fit(x, y)
# Predict from training and test x and check that we have two NaNs.
y1 = ir.predict([min(x) - 10, max(x) + 10])
assert_equal(sum(np.isnan(y1)), 2)
def test_isotonic_regression_oob_bad():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="xyz")
# Make sure that we throw an error for bad out_of_bounds value
assert_raises(ValueError, ir.fit, x, y)
def test_isotonic_regression_oob_bad_after():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="raise")
# Make sure that we throw an error for bad out_of_bounds value in transform
ir.fit(x, y)
ir.out_of_bounds = "xyz"
assert_raises(ValueError, ir.transform, x)
def test_isotonic_regression_pickle():
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="clip")
ir.fit(x, y)
ir_ser = pickle.dumps(ir, pickle.HIGHEST_PROTOCOL)
ir2 = pickle.loads(ir_ser)
np.testing.assert_array_equal(ir.predict(x), ir2.predict(x))
def test_isotonic_duplicate_min_entry():
x = [0, 0, 1]
y = [0, 0, 1]
ir = IsotonicRegression(increasing=True, out_of_bounds="clip")
ir.fit(x, y)
all_predictions_finite = np.all(np.isfinite(ir.predict(x)))
assert_true(all_predictions_finite)
def test_isotonic_zero_weight_loop():
# Test from @ogrisel's issue:
# https://github.com/scikit-learn/scikit-learn/issues/4297
# Get deterministic RNG with seed
rng = np.random.RandomState(42)
# Create regression and samples
regression = IsotonicRegression()
n_samples = 50
x = np.linspace(-3, 3, n_samples)
y = x + rng.uniform(size=n_samples)
# Get some random weights and zero out
w = rng.uniform(size=n_samples)
w[5:8] = 0
regression.fit(x, y, sample_weight=w)
# This will hang in failure case.
regression.fit(x, y, sample_weight=w)
| bsd-3-clause |
mihail911/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/backends/backend_cairo.py | 69 | 16706 | """
A Cairo backend for matplotlib
Author: Steve Chaplin
Cairo is a vector graphics library with cross-device output support.
Features of Cairo:
* anti-aliasing
* alpha channel
* saves image files as PNG, PostScript, PDF
http://cairographics.org
Requires (in order, all available from Cairo website):
cairo, pycairo
Naming Conventions
* classes MixedUpperCase
* varables lowerUpper
* functions underscore_separated
"""
from __future__ import division
import os, sys, warnings, gzip
import numpy as npy
def _fn_name(): return sys._getframe(1).f_code.co_name
try:
import cairo
except ImportError:
raise ImportError("Cairo backend requires that pycairo is installed.")
_version_required = (1,2,0)
if cairo.version_info < _version_required:
raise ImportError ("Pycairo %d.%d.%d is installed\n"
"Pycairo %d.%d.%d or later is required"
% (cairo.version_info + _version_required))
backend_version = cairo.version
del _version_required
from matplotlib.backend_bases import RendererBase, GraphicsContextBase,\
FigureManagerBase, FigureCanvasBase
from matplotlib.cbook import is_string_like
from matplotlib.figure import Figure
from matplotlib.mathtext import MathTextParser
from matplotlib.path import Path
from matplotlib.transforms import Bbox, Affine2D
from matplotlib.font_manager import ttfFontProperty
from matplotlib import rcParams
_debug = False
#_debug = True
# Image::color_conv(format) for draw_image()
if sys.byteorder == 'little':
BYTE_FORMAT = 0 # BGRA
else:
BYTE_FORMAT = 1 # ARGB
class RendererCairo(RendererBase):
fontweights = {
100 : cairo.FONT_WEIGHT_NORMAL,
200 : cairo.FONT_WEIGHT_NORMAL,
300 : cairo.FONT_WEIGHT_NORMAL,
400 : cairo.FONT_WEIGHT_NORMAL,
500 : cairo.FONT_WEIGHT_NORMAL,
600 : cairo.FONT_WEIGHT_BOLD,
700 : cairo.FONT_WEIGHT_BOLD,
800 : cairo.FONT_WEIGHT_BOLD,
900 : cairo.FONT_WEIGHT_BOLD,
'ultralight' : cairo.FONT_WEIGHT_NORMAL,
'light' : cairo.FONT_WEIGHT_NORMAL,
'normal' : cairo.FONT_WEIGHT_NORMAL,
'medium' : cairo.FONT_WEIGHT_NORMAL,
'semibold' : cairo.FONT_WEIGHT_BOLD,
'bold' : cairo.FONT_WEIGHT_BOLD,
'heavy' : cairo.FONT_WEIGHT_BOLD,
'ultrabold' : cairo.FONT_WEIGHT_BOLD,
'black' : cairo.FONT_WEIGHT_BOLD,
}
fontangles = {
'italic' : cairo.FONT_SLANT_ITALIC,
'normal' : cairo.FONT_SLANT_NORMAL,
'oblique' : cairo.FONT_SLANT_OBLIQUE,
}
def __init__(self, dpi):
"""
"""
if _debug: print '%s.%s()' % (self.__class__.__name__, _fn_name())
self.dpi = dpi
self.text_ctx = cairo.Context (
cairo.ImageSurface (cairo.FORMAT_ARGB32,1,1))
self.mathtext_parser = MathTextParser('Cairo')
def set_ctx_from_surface (self, surface):
self.ctx = cairo.Context (surface)
self.ctx.save() # restore, save - when call new_gc()
def set_width_height(self, width, height):
self.width = width
self.height = height
self.matrix_flipy = cairo.Matrix (yy=-1, y0=self.height)
# use matrix_flipy for ALL rendering?
# - problem with text? - will need to switch matrix_flipy off, or do a
# font transform?
def _fill_and_stroke (self, ctx, fill_c, alpha):
if fill_c is not None:
ctx.save()
if len(fill_c) == 3:
ctx.set_source_rgba (fill_c[0], fill_c[1], fill_c[2], alpha)
else:
ctx.set_source_rgba (fill_c[0], fill_c[1], fill_c[2], alpha*fill_c[3])
ctx.fill_preserve()
ctx.restore()
ctx.stroke()
#@staticmethod
def convert_path(ctx, tpath):
for points, code in tpath.iter_segments():
if code == Path.MOVETO:
ctx.move_to(*points)
elif code == Path.LINETO:
ctx.line_to(*points)
elif code == Path.CURVE3:
ctx.curve_to(points[0], points[1],
points[0], points[1],
points[2], points[3])
elif code == Path.CURVE4:
ctx.curve_to(*points)
elif code == Path.CLOSEPOLY:
ctx.close_path()
convert_path = staticmethod(convert_path)
def draw_path(self, gc, path, transform, rgbFace=None):
if len(path.vertices) > 18980:
raise ValueError("The Cairo backend can not draw paths longer than 18980 points.")
ctx = gc.ctx
transform = transform + \
Affine2D().scale(1.0, -1.0).translate(0, self.height)
tpath = transform.transform_path(path)
ctx.new_path()
self.convert_path(ctx, tpath)
self._fill_and_stroke(ctx, rgbFace, gc.get_alpha())
def draw_image(self, x, y, im, bbox, clippath=None, clippath_trans=None):
# bbox - not currently used
if _debug: print '%s.%s()' % (self.__class__.__name__, _fn_name())
im.flipud_out()
rows, cols, buf = im.color_conv (BYTE_FORMAT)
surface = cairo.ImageSurface.create_for_data (
buf, cairo.FORMAT_ARGB32, cols, rows, cols*4)
# function does not pass a 'gc' so use renderer.ctx
ctx = self.ctx
y = self.height - y - rows
ctx.set_source_surface (surface, x, y)
ctx.paint()
im.flipud_out()
def draw_text(self, gc, x, y, s, prop, angle, ismath=False):
# Note: x,y are device/display coords, not user-coords, unlike other
# draw_* methods
if _debug: print '%s.%s()' % (self.__class__.__name__, _fn_name())
if ismath:
self._draw_mathtext(gc, x, y, s, prop, angle)
else:
ctx = gc.ctx
ctx.new_path()
ctx.move_to (x, y)
ctx.select_font_face (prop.get_name(),
self.fontangles [prop.get_style()],
self.fontweights[prop.get_weight()])
size = prop.get_size_in_points() * self.dpi / 72.0
ctx.save()
if angle:
ctx.rotate (-angle * npy.pi / 180)
ctx.set_font_size (size)
ctx.show_text (s.encode("utf-8"))
ctx.restore()
def _draw_mathtext(self, gc, x, y, s, prop, angle):
if _debug: print '%s.%s()' % (self.__class__.__name__, _fn_name())
ctx = gc.ctx
width, height, descent, glyphs, rects = self.mathtext_parser.parse(
s, self.dpi, prop)
ctx.save()
ctx.translate(x, y)
if angle:
ctx.rotate (-angle * npy.pi / 180)
for font, fontsize, s, ox, oy in glyphs:
ctx.new_path()
ctx.move_to(ox, oy)
fontProp = ttfFontProperty(font)
ctx.save()
ctx.select_font_face (fontProp.name,
self.fontangles [fontProp.style],
self.fontweights[fontProp.weight])
size = fontsize * self.dpi / 72.0
ctx.set_font_size(size)
ctx.show_text(s.encode("utf-8"))
ctx.restore()
for ox, oy, w, h in rects:
ctx.new_path()
ctx.rectangle (ox, oy, w, h)
ctx.set_source_rgb (0, 0, 0)
ctx.fill_preserve()
ctx.restore()
def flipy(self):
if _debug: print '%s.%s()' % (self.__class__.__name__, _fn_name())
return True
#return False # tried - all draw objects ok except text (and images?)
# which comes out mirrored!
def get_canvas_width_height(self):
if _debug: print '%s.%s()' % (self.__class__.__name__, _fn_name())
return self.width, self.height
def get_text_width_height_descent(self, s, prop, ismath):
if _debug: print '%s.%s()' % (self.__class__.__name__, _fn_name())
if ismath:
width, height, descent, fonts, used_characters = self.mathtext_parser.parse(
s, self.dpi, prop)
return width, height, descent
ctx = self.text_ctx
ctx.save()
ctx.select_font_face (prop.get_name(),
self.fontangles [prop.get_style()],
self.fontweights[prop.get_weight()])
# Cairo (says it) uses 1/96 inch user space units, ref: cairo_gstate.c
# but if /96.0 is used the font is too small
size = prop.get_size_in_points() * self.dpi / 72.0
# problem - scale remembers last setting and font can become
# enormous causing program to crash
# save/restore prevents the problem
ctx.set_font_size (size)
y_bearing, w, h = ctx.text_extents (s)[1:4]
ctx.restore()
return w, h, h + y_bearing
def new_gc(self):
if _debug: print '%s.%s()' % (self.__class__.__name__, _fn_name())
self.ctx.restore() # matches save() in set_ctx_from_surface()
self.ctx.save()
return GraphicsContextCairo (renderer=self)
def points_to_pixels(self, points):
if _debug: print '%s.%s()' % (self.__class__.__name__, _fn_name())
return points/72.0 * self.dpi
class GraphicsContextCairo(GraphicsContextBase):
_joind = {
'bevel' : cairo.LINE_JOIN_BEVEL,
'miter' : cairo.LINE_JOIN_MITER,
'round' : cairo.LINE_JOIN_ROUND,
}
_capd = {
'butt' : cairo.LINE_CAP_BUTT,
'projecting' : cairo.LINE_CAP_SQUARE,
'round' : cairo.LINE_CAP_ROUND,
}
def __init__(self, renderer):
GraphicsContextBase.__init__(self)
self.renderer = renderer
self.ctx = renderer.ctx
def set_alpha(self, alpha):
self._alpha = alpha
rgb = self._rgb
self.ctx.set_source_rgba (rgb[0], rgb[1], rgb[2], alpha)
#def set_antialiased(self, b):
# enable/disable anti-aliasing is not (yet) supported by Cairo
def set_capstyle(self, cs):
if cs in ('butt', 'round', 'projecting'):
self._capstyle = cs
self.ctx.set_line_cap (self._capd[cs])
else:
raise ValueError('Unrecognized cap style. Found %s' % cs)
def set_clip_rectangle(self, rectangle):
self._cliprect = rectangle
if rectangle is None:
return
x,y,w,h = rectangle.bounds
# pixel-aligned clip-regions are faster
x,y,w,h = round(x), round(y), round(w), round(h)
ctx = self.ctx
ctx.new_path()
ctx.rectangle (x, self.renderer.height - h - y, w, h)
ctx.clip ()
# Alternative: just set _cliprect here and actually set cairo clip rect
# in fill_and_stroke() inside ctx.save() ... ctx.restore()
def set_clip_path(self, path):
if path is not None:
tpath, affine = path.get_transformed_path_and_affine()
ctx = self.ctx
ctx.new_path()
affine = affine + Affine2D().scale(1.0, -1.0).translate(0.0, self.renderer.height)
tpath = affine.transform_path(tpath)
RendererCairo.convert_path(ctx, tpath)
ctx.clip()
def set_dashes(self, offset, dashes):
self._dashes = offset, dashes
if dashes == None:
self.ctx.set_dash([], 0) # switch dashes off
else:
self.ctx.set_dash (
self.renderer.points_to_pixels (npy.asarray(dashes)), offset)
def set_foreground(self, fg, isRGB=None):
GraphicsContextBase.set_foreground(self, fg, isRGB)
if len(self._rgb) == 3:
self.ctx.set_source_rgb(*self._rgb)
else:
self.ctx.set_source_rgba(*self._rgb)
def set_graylevel(self, frac):
GraphicsContextBase.set_graylevel(self, frac)
if len(self._rgb) == 3:
self.ctx.set_source_rgb(*self._rgb)
else:
self.ctx.set_source_rgba(*self._rgb)
def set_joinstyle(self, js):
if js in ('miter', 'round', 'bevel'):
self._joinstyle = js
self.ctx.set_line_join(self._joind[js])
else:
raise ValueError('Unrecognized join style. Found %s' % js)
def set_linewidth(self, w):
self._linewidth = w
self.ctx.set_line_width (self.renderer.points_to_pixels(w))
def new_figure_manager(num, *args, **kwargs): # called by backends/__init__.py
"""
Create a new figure manager instance
"""
if _debug: print '%s.%s()' % (self.__class__.__name__, _fn_name())
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass(*args, **kwargs)
canvas = FigureCanvasCairo(thisFig)
manager = FigureManagerBase(canvas, num)
return manager
class FigureCanvasCairo (FigureCanvasBase):
def print_png(self, fobj, *args, **kwargs):
width, height = self.get_width_height()
renderer = RendererCairo (self.figure.dpi)
renderer.set_width_height (width, height)
surface = cairo.ImageSurface (cairo.FORMAT_ARGB32, width, height)
renderer.set_ctx_from_surface (surface)
self.figure.draw (renderer)
surface.write_to_png (fobj)
def print_pdf(self, fobj, *args, **kwargs):
return self._save(fobj, 'pdf', *args, **kwargs)
def print_ps(self, fobj, *args, **kwargs):
return self._save(fobj, 'ps', *args, **kwargs)
def print_svg(self, fobj, *args, **kwargs):
return self._save(fobj, 'svg', *args, **kwargs)
def print_svgz(self, fobj, *args, **kwargs):
return self._save(fobj, 'svgz', *args, **kwargs)
def get_default_filetype(self):
return rcParams['cairo.format']
def _save (self, fo, format, **kwargs):
# save PDF/PS/SVG
orientation = kwargs.get('orientation', 'portrait')
dpi = 72
self.figure.dpi = dpi
w_in, h_in = self.figure.get_size_inches()
width_in_points, height_in_points = w_in * dpi, h_in * dpi
if orientation == 'landscape':
width_in_points, height_in_points = (height_in_points,
width_in_points)
if format == 'ps':
if not cairo.HAS_PS_SURFACE:
raise RuntimeError ('cairo has not been compiled with PS '
'support enabled')
surface = cairo.PSSurface (fo, width_in_points, height_in_points)
elif format == 'pdf':
if not cairo.HAS_PDF_SURFACE:
raise RuntimeError ('cairo has not been compiled with PDF '
'support enabled')
surface = cairo.PDFSurface (fo, width_in_points, height_in_points)
elif format in ('svg', 'svgz'):
if not cairo.HAS_SVG_SURFACE:
raise RuntimeError ('cairo has not been compiled with SVG '
'support enabled')
if format == 'svgz':
filename = fo
if is_string_like(fo):
fo = open(fo, 'wb')
fo = gzip.GzipFile(None, 'wb', fileobj=fo)
surface = cairo.SVGSurface (fo, width_in_points, height_in_points)
else:
warnings.warn ("unknown format: %s" % format)
return
# surface.set_dpi() can be used
renderer = RendererCairo (self.figure.dpi)
renderer.set_width_height (width_in_points, height_in_points)
renderer.set_ctx_from_surface (surface)
ctx = renderer.ctx
if orientation == 'landscape':
ctx.rotate (npy.pi/2)
ctx.translate (0, -height_in_points)
# cairo/src/cairo_ps_surface.c
# '%%Orientation: Portrait' is always written to the file header
# '%%Orientation: Landscape' would possibly cause problems
# since some printers would rotate again ?
# TODO:
# add portrait/landscape checkbox to FileChooser
self.figure.draw (renderer)
show_fig_border = False # for testing figure orientation and scaling
if show_fig_border:
ctx.new_path()
ctx.rectangle(0, 0, width_in_points, height_in_points)
ctx.set_line_width(4.0)
ctx.set_source_rgb(1,0,0)
ctx.stroke()
ctx.move_to(30,30)
ctx.select_font_face ('sans-serif')
ctx.set_font_size(20)
ctx.show_text('Origin corner')
ctx.show_page()
surface.finish()
| gpl-3.0 |
OCM-Lab-PUC/switch-chile | switch_mod/Chile/exporting.py | 1 | 14023 | # Copyright 2015 The Switch Authors. All rights reserved.
# Licensed under the Apache License, Version 2, which is in the LICENSE file.
"""
This modules writes out output tables with certain processing.
This tables are mostly useful for quick iterations when testing code.
"""
import os, time, sys
from pyomo.environ import *
from switch_mod.financials import *
from csv import reader
import matplotlib.pyplot as plt
import pandas as pd
from cycler import cycler
import switch_mod.export as export
def define_arguments(argparser):
argparser.add_argument(
"--export-marginal-costs", action='store_true', default=False,
help="Exports energy marginal costs in US$/MWh per load zone and timepoint, calculated as dual variable values from the energy balance constraint."
)
argparser.add_argument(
"--export-capacities", action='store_true', default=False,
help="Exports cummulative installed generating capacity in MW per technology per period."
)
argparser.add_argument(
"--export-tech-dispatch", action='store_true', default=False,
help="Exports dispatched capacity per generator technology in MW per timepoint."
)
argparser.add_argument(
"--export-reservoirs", action='store_true', default=False,
help="Exports final reservoir volumes in cubic meters per timepoint."
)
def define_components(mod):
#Define dual variables, so that marginal costs can be computed eventually
if not hasattr(mod, 'dual'):
mod.dual = Suffix(direction=Suffix.IMPORT)
def define_dynamic_components(mod):
#Separate the computation of Investment and Operations cost, for comparison with stochastic problem
import switch_mod.financials as fin
def calc_tp_costs_in_period(m, t):
return sum(
getattr(m, tp_cost)[t] * m.tp_weight_in_year[t]
for tp_cost in m.cost_components_tp)
def calc_annual_costs_in_period(m, p):
return sum(
getattr(m, annual_cost)[p]
for annual_cost in m.cost_components_annual)
mod.TotalInvestmentCost = Expression(rule=lambda m: sum(calc_annual_costs_in_period(m, p) * fin.uniform_series_to_present_value(
m.discount_rate, m.period_length_years[p]) * fin.future_to_present_value(
m.discount_rate, (m.period_start[p] - m.base_financial_year)) for p in m.PERIODS))
mod.TotalOperationsCost = Expression(rule=lambda m: sum(m.SystemCostPerPeriod[p] for p in m.PERIODS) - m.TotalInvestmentCost)
def post_solve(instance, outdir):
summaries_dir = os.path.join(outdir,"Summaries")
if not os.path.exists(summaries_dir):
os.makedirs(summaries_dir)
print "\nStarting to print summaries"
start=time.time()
if instance.options.export_marginal_costs:
"""
This table writes out the marginal costs of supplying energy in each timepoint in US$/MWh.
"""
print "marginal_costs_lz_tp.csv..."
export.write_table(
instance, instance.TIMEPOINTS, instance.LOAD_ZONES,
output_file=os.path.join(summaries_dir, "marginal_costs_lz_tp.csv"),
headings=("timepoint","load_zones","marginal_cost"),
values=lambda m, tp, lz: (m.tp_timestamp[tp], lz, m.dual[m.Energy_Balance[lz, tp]] / (m.tp_weight_in_year[tp] * uniform_series_to_present_value(
m.discount_rate, m.period_length_years[m.tp_period[tp]]) * future_to_present_value(
m.discount_rate, (m.period_start[m.tp_period[tp]] - m.base_financial_year)))
))
df = pd.read_csv('outputs/Summaries/marginal_costs_lz_tp.csv',sep='\t')
lz_dfs = []
for lz in instance.LOAD_ZONES:
lz_dfs.append(df[df.load_zones == lz].drop(['load_zones','timepoint'],axis=1).reset_index(drop=True))
lz_dfs[-1].columns = [lz]
DF = pd.concat(lz_dfs, axis=1)
fig = plt.figure(1)
mc_ax = fig.add_subplot(211)
# GO cycling through the rainbow to get line colours
cm = plt.get_cmap('gist_rainbow')
# You have to play with the color map and the line style list to get enough combinations for your particular plot
mc_ax.set_prop_cycle(cycler('linestyle',['-',':','--','-.']) * cycler('color',[cm(i/5.0) for i in range(0,6)]))
# to locate the legend: "loc" is the point of the legend for which you will specify cooridnates. These coords are specified in bbox_to_anchor (can be only 1 point or couple)
mc_plot = DF.plot(ax=mc_ax,linewidth=1.5).legend(loc='upper center', fontsize=10, bbox_to_anchor=(0.,-0.15,1.,-0.15), ncol=3, mode="expand")
plt.xticks([i*24 for i in range(1,len(instance.TIMEPOINTS)/24+1)],[instance.tp_timestamp[instance.TIMEPOINTS[i*24]] for i in range(1,len(instance.TIMEPOINTS)/24+1)],rotation=40,fontsize=7)
plt.savefig('outputs/Summaries/marginal_costs.pdf',bbox_extra_artists=(mc_plot,))
"""
This table writes out the fuel consumption in MMBTU per hour.
"""
# print "energy_produced_in_period_by_each_project.csv..."
# export.write_table(
# instance, instance.PERIODS, instance.PROJECTS,
# output_file=os.path.join(summaries_dir, "energy_produced_in_period_by_each_project.csv"),
# headings=("period", "project", "energy_produced_GWh"),
# values=lambda m, p, proj: (p, proj,) + tuple(
# sum(m.DispatchProj[proj,tp]*m.tp_weight[tp] for tp in m.PERIOD_TPS[p])/1000)
# )
# """
# This table writes out the fuel consumption in MMBTU per hour.
# """
# print "fuel_consumption_tp_hourly.csv..."
# export.write_table(
# instance, instance.TIMEPOINTS,
# output_file=os.path.join(summaries_dir, "fuel_consumption_tp_hourly.csv"),
# headings=("timepoint",) + tuple(f for f in instance.FUELS),
# values=lambda m, tp: (m.tp_timestamp[tp],) + tuple(
# sum(m.ProjFuelUseRate[proj, t, f] for (proj,t) in m.PROJ_WITH_FUEL_DISPATCH_POINTS
# if m.g_energy_source[m.proj_gen_tech[proj]] == f and t == tp)
# for f in m.FUELS)
# )
# """
# This table writes out the fuel consumption in total MMBTU consumed in each period.
# """
# print "fuel_consumption_periods_total.csv..."
# export.write_table(
# instance, instance.PERIODS,
# output_file=os.path.join(summaries_dir, "fuel_consumption_periods_total.csv"),
# headings=("period",) + tuple(f for f in instance.FUELS),
# values=lambda m, p: (p,) + tuple(
# sum(m.ProjFuelUseRate[proj, tp, f] * m.tp_weight[tp] for (proj, tp) in m.PROJ_WITH_FUEL_DISPATCH_POINTS
# if tp in m.PERIOD_TPS[p] and m.g_energy_source[m.proj_gen_tech[proj]] == f)
# for f in m.FUELS)
# )
if instance.options.export_capacities:
"""
This table writes out the capacity that it available in each period
by technology.
"""
print "build_proj_by_tech_p.csv..."
export.write_table(
instance, instance.GENERATION_TECHNOLOGIES,
output_file=os.path.join(summaries_dir, "build_proj_by_tech_p.csv"),
headings=("gentech","Legacy") + tuple(p for p in instance.PERIODS),
values=lambda m, g: (g, sum(m.BuildProj[proj, bldyr] for (proj, bldyr) in m.PROJECT_BUILDYEARS
if m.proj_gen_tech[proj] == g and bldyr not in m.PERIODS)) + tuple(
sum(m.ProjCapacity[proj, p] for proj in m.PROJECTS if m.proj_gen_tech[proj] == g)
for p in m.PERIODS)
)
DF = pd.read_csv('outputs/Summaries/build_proj_by_tech_p.csv',sep='\t').transpose()
DF.columns = DF.iloc[0]
DF=DF.drop('gentech')
fig = plt.figure(2)
tech_ax = fig.add_subplot(211)
# GO cycling through the rainbow to get line colours
cm = plt.get_cmap('gist_rainbow')
# You have to play with the color map and the line style list to get enough combinations for your particular plot
tech_ax.set_prop_cycle(cycler('color',[cm(i/7.0) for i in range(0,8)]))
# to locate the legend: "loc" is the point of the legend for which you will specify cooridnates. These coords are specified in bbox_to_anchor (can be only 1 point or couple)
tech_plot = DF.plot(ax=tech_ax,kind='bar').legend(loc='upper center', fontsize=10, bbox_to_anchor=(0.,-0.07,1.,-0.07), ncol=2, mode="expand")
plt.xticks(rotation=0,fontsize=12)
plt.savefig('outputs/Summaries/gentech_capacities.pdf',bbox_extra_artists=(tech_plot,))
if instance.options.export_tech_dispatch:
"""
This table writes out the aggregated dispatch of each gen tech on each timepoint.
"""
print "dispatch_proj_by_tech_tp.csv..."
export.write_table(
instance, instance.TIMEPOINTS,
output_file=os.path.join(summaries_dir, "dispatch_proj_by_tech_tp.csv"),
headings=("gentech",) + tuple(g for g in instance.GENERATION_TECHNOLOGIES) + ("total",),
values=lambda m, tp: (m.tp_timestamp[tp],) + tuple(
sum(m.DispatchProj[proj, t] for (proj, t) in m.PROJ_DISPATCH_POINTS
if m.proj_gen_tech[proj] == g and t == tp)
for g in m.GENERATION_TECHNOLOGIES) + (
sum(m.DispatchProj[proj, t] for (proj, t) in m.PROJ_DISPATCH_POINTS if t == tp),)
)
DF = pd.read_csv('outputs/Summaries/dispatch_proj_by_tech_tp.csv',sep='\t').drop(['gentech'],axis=1)
fig = plt.figure(3)
dis_ax = fig.add_subplot(211)
# GO cycling through the rainbow to get line colours
cm = plt.get_cmap('gist_rainbow')
# You have to play with the color map and the line style list to get enough combinations for your particular plot
dis_ax.set_prop_cycle(cycler('linestyle',['-','--',':']) * cycler('color',[cm(i/5.0) for i in range(0,6)]))
# to locate the legend: "loc" is the point of the legend for which you will specify cooridnates. These coords are specified in bbox_to_anchor (can be only 1 point or couple)
dis_plot = DF.plot(ax=dis_ax,linewidth=1.5).legend(loc='upper center', fontsize=10, bbox_to_anchor=(0.,-0.15,1.,-0.15), ncol=2, mode="expand")
plt.xticks([i*5 for i in range(1,len(instance.TIMEPOINTS)/5+1)],[instance.tp_timestamp[instance.TIMEPOINTS[i*5]] for i in range(1,len(instance.TIMEPOINTS)/5+1)],rotation=40,fontsize=7)
plt.savefig('outputs/Summaries/gentech_dispatch.pdf',bbox_extra_artists=(dis_plot,))
if instance.options.export_reservoirs:
"""
This table writes out reservoir levels in cubic meters per tp.
"""
print "reservoir_final_vols_tp.csv..."
export.write_table(
instance, instance.TIMEPOINTS,
output_file=os.path.join(summaries_dir, "reservoir_final_vols_tp.csv"),
headings=("timepoints",) + tuple(r for r in instance.RESERVOIRS) + ("total",),
values=lambda m, tp: (m.tp_timestamp[tp],) + tuple(m.ReservoirFinalvol[r, tp] - m.initial_res_vol[r] for r in m.RESERVOIRS) + (
sum(m.ReservoirFinalvol[r, tp] - m.initial_res_vol[r] for r in m.RESERVOIRS),)
)
DF = pd.read_csv('outputs/Summaries/reservoir_final_vols_tp.csv',sep='\t').drop(['timepoints'],axis=1)
fig2 = plt.figure(4)
res_ax = fig2.add_subplot(211)
# GO cycling through the rainbow to get line colours
cm = plt.get_cmap('gist_rainbow')
# You have to play with the color map and the line style list to get enough combinations for your particular plot
res_ax.set_prop_cycle(cycler('linestyle',['-',':','--']) * cycler('color',[cm(i/5.0) for i in range(0,6)]))
# to locate the legend: "loc" is the point of the legend for which you will specify cooridnates. These coords are specified in bbox_to_anchor (can be only 1 point or couple)
res_plot = DF.plot(ax=res_ax,linewidth=1.5).legend(loc='upper center', fontsize=10, bbox_to_anchor=(0.,-0.15,1.,-0.15), ncol=2, mode="expand")
plt.xticks([i*24 for i in range(1,len(instance.TIMEPOINTS)/24+1)],[instance.tp_timestamp[instance.TIMEPOINTS[i*24]] for i in range(1,len(instance.TIMEPOINTS)/24+1)],rotation=40,fontsize=7)
plt.savefig('outputs/Summaries/reservoir_levels.pdf',bbox_extra_artists=(res_plot,))
"""
Writing Objective Function value.
"""
print "total_system_costs.txt..."
with open(os.path.join(summaries_dir, "total_system_costs.txt"),'w+') as f:
f.write("Total System Costs: "+str(instance.SystemCost())+"\n")
f.write("Total Investment Costs: "+str(instance.TotalInvestmentCost())+"\n")
f.write("Total Operations Costs: "+str(instance.TotalOperationsCost()))
# # This table writes out the dispatch of each gen tech on each timepoint and load zone.
# #This process is extremely slow, need to make it efficient
# print "dispatch_proj_by_tech_lz_tp.csv..."
# export.write_table(
# instance, instance.TIMEPOINTS, instance.LOAD_ZONES,
# output_file=os.path.join(summaries_dir, "dispatch_proj_by_tech_lz_tp.csv"),
# headings=("load zone", "timepoint",) + tuple(g for g in instance.GENERATION_TECHNOLOGIES),
# values=lambda m, tp, lz: (lz, m.tp_timestamp[tp],) + tuple(
# sum(m.DispatchProj[proj, t] for (proj, t) in m.PROJ_DISPATCH_POINTS
# if m.proj_gen_tech[proj] == g and t == tp and m.proj_load_zone[proj] == lz)
# for g in m.GENERATION_TECHNOLOGIES)
# )
print "Time taken writing summaries: {dur:.2f}s".format(dur=time.time()-start)
| apache-2.0 |
hfutsuchao/Python2.6 | stocks/option_data_change_backup.py | 1 | 1057 | #!/usr/bin/python
#coding:utf-8
import sys
import pandas as pd
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
import numpy as np
DB_CONNECT_STRING = 'sqlite:///stock_option_data.db'
engine = create_engine(DB_CONNECT_STRING,echo=False)
DB_CONNECT_STRING2 = 'sqlite:///stock_option_data2.db'
engine2 = create_engine(DB_CONNECT_STRING2,echo=False)
data = pd.read_sql('select * from option_greeks;',engine)
for c in data.columns:
try:
data[c] = data[c].astype('float64')
except Exception,e:
print e
data = data.drop_duplicates()
data = data.set_index('17Strike')
data.to_sql('option_greeks',engine2,if_exists='append')
#price
#data = pd.read_sql('select * from option_price where date<"2017-06-01";',engine)
data = pd.read_sql('select * from option_price;',engine)
for c in data.columns:
#data[c][data[c]==''] = np.nan
try:
data[c] = data[c].astype('float64')
except Exception,e:
print e
data = data.drop_duplicates()
data = data.set_index('17Strike')
data.to_sql('option_price',engine2,if_exists='append')
| gpl-2.0 |
nvoron23/scikit-learn | examples/bicluster/plot_spectral_biclustering.py | 403 | 2011 | """
=============================================
A demo of the Spectral Biclustering algorithm
=============================================
This example demonstrates how to generate a checkerboard dataset and
bicluster it using the Spectral Biclustering algorithm.
The data is generated with the ``make_checkerboard`` function, then
shuffled and passed to the Spectral Biclustering algorithm. The rows
and columns of the shuffled matrix are rearranged to show the
biclusters found by the algorithm.
The outer product of the row and column label vectors shows a
representation of the checkerboard structure.
"""
print(__doc__)
# Author: Kemal Eren <[email protected]>
# License: BSD 3 clause
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import make_checkerboard
from sklearn.datasets import samples_generator as sg
from sklearn.cluster.bicluster import SpectralBiclustering
from sklearn.metrics import consensus_score
n_clusters = (4, 3)
data, rows, columns = make_checkerboard(
shape=(300, 300), n_clusters=n_clusters, noise=10,
shuffle=False, random_state=0)
plt.matshow(data, cmap=plt.cm.Blues)
plt.title("Original dataset")
data, row_idx, col_idx = sg._shuffle(data, random_state=0)
plt.matshow(data, cmap=plt.cm.Blues)
plt.title("Shuffled dataset")
model = SpectralBiclustering(n_clusters=n_clusters, method='log',
random_state=0)
model.fit(data)
score = consensus_score(model.biclusters_,
(rows[:, row_idx], columns[:, col_idx]))
print("consensus score: {:.1f}".format(score))
fit_data = data[np.argsort(model.row_labels_)]
fit_data = fit_data[:, np.argsort(model.column_labels_)]
plt.matshow(fit_data, cmap=plt.cm.Blues)
plt.title("After biclustering; rearranged to show biclusters")
plt.matshow(np.outer(np.sort(model.row_labels_) + 1,
np.sort(model.column_labels_) + 1),
cmap=plt.cm.Blues)
plt.title("Checkerboard structure of rearranged data")
plt.show()
| bsd-3-clause |
rootpy/rootpy | rootpy/plotting/style/atlas/style_mpl.py | 2 | 1477 | """
ATLAS-like style for matplotlib
"""
__all__ = [
'style_mpl',
]
def style_mpl():
STYLE = {}
STYLE['lines.linewidth'] = 1
# font
STYLE['font.family'] = 'sans-serif'
STYLE['mathtext.fontset'] = 'stixsans'
STYLE['mathtext.default'] = 'rm'
# helvetica usually not present on linux
STYLE['font.sans-serif'] = 'helvetica, Helvetica, Nimbus Sans L, Mukti Narrow, FreeSans'
# figure layout
STYLE['figure.figsize'] = 8.75, 5.92
# atlasStyle->SetPaperSize(20,26); # in cm
# STYLE['figure.figsize'] = 10.2362205, 7.874015 # in inc, not working
STYLE['figure.facecolor'] = 'white'
STYLE['figure.subplot.bottom'] = 0.16
STYLE['figure.subplot.top'] = 0.95
STYLE['figure.subplot.left'] = 0.16
STYLE['figure.subplot.right'] = 0.95
# axes
STYLE['axes.labelsize'] = 20
STYLE['xtick.labelsize'] = 19
STYLE['xtick.major.size'] = 12
STYLE['xtick.minor.size'] = 6
STYLE['ytick.labelsize'] = 19
STYLE['ytick.major.size'] = 14
STYLE['ytick.minor.size'] = 7
STYLE['lines.markersize'] = 8
# STYLE['lines.markeredgewidth'] = 0. # not working, it changes other stuff
# legend
STYLE['legend.numpoints'] = 1
STYLE['legend.fontsize'] = 19
STYLE['legend.labelspacing'] = 0.3
STYLE['legend.frameon'] = False
# what cannot be set with rcParams:
# * markeredgewidth
# * axis-label alignment
# * axis-label offset
# * axis-ticks
return STYLE
| bsd-3-clause |
reinvantveer/Topology-Learning | model/building_convnet.py | 1 | 6991 | """
This script executes the task of estimating the building type, based solely on the geometry for that building.
The data for this script can be found at http://hdl.handle.net/10411/GYPPBR.
"""
import os
import socket
import sys
from datetime import datetime, timedelta
from pathlib import Path
from time import time
from urllib.request import urlretrieve
import numpy as np
from keras import Input
from keras.callbacks import TensorBoard
from keras.engine import Model
from keras.layers import Dense, Conv1D, MaxPooling1D, GlobalAveragePooling1D, Dropout
from keras.optimizers import Adam
from keras.preprocessing.sequence import pad_sequences
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
PACKAGE_PARENT = '..'
SCRIPT_DIR = os.path.dirname(os.path.realpath(os.path.join(os.getcwd(), os.path.expanduser(__file__))))
sys.path.append(os.path.normpath(os.path.join(SCRIPT_DIR, PACKAGE_PARENT)))
from prep.ProgressBar import ProgressBar
from topoml_util import geom_scaler
from topoml_util.slack_send import notify
SCRIPT_VERSION = '2.0.3'
SCRIPT_NAME = os.path.basename(__file__)
TIMESTAMP = str(datetime.now()).replace(':', '.')
SIGNATURE = SCRIPT_NAME + ' ' + SCRIPT_VERSION + ' ' + TIMESTAMP
DATA_FOLDER = '../files/buildings/'
TRAIN_DATA_FILE = 'buildings_train_v7.npz'
TEST_DATA_FILE = 'buildings_test_v7.npz'
TRAIN_DATA_URL = 'https://dataverse.nl/api/access/datafile/11381'
TEST_DATA_URL = 'https://dataverse.nl/api/access/datafile/11380'
SCRIPT_START = time()
# Hyperparameters
hp = {
'BATCH_SIZE': int(os.getenv('BATCH_SIZE', 32)),
'TRAIN_VALIDATE_SPLIT': float(os.getenv('TRAIN_VALIDATE_SPLIT', 0.1)),
'REPEAT_DEEP_ARCH': int(os.getenv('REPEAT_DEEP_ARCH', 0)),
'DENSE_SIZE': int(os.getenv('DENSE_SIZE', 32)),
'EPOCHS': int(os.getenv('EPOCHS', 200)),
'LEARNING_RATE': float(os.getenv('LEARNING_RATE', 1e-4)),
'DROPOUT': float(os.getenv('DROPOUT', 0.0)),
'GEOM_SCALE': float(os.getenv("GEOM_SCALE", 0)), # If no default or 0: overridden when data is known
}
OPTIMIZER = Adam(lr=hp['LEARNING_RATE'])
# Load training data
path = Path(DATA_FOLDER + TRAIN_DATA_FILE)
if not path.exists():
print("Retrieving training data from web...")
urlretrieve(TRAIN_DATA_URL, DATA_FOLDER + TRAIN_DATA_FILE)
train_loaded = np.load(DATA_FOLDER + TRAIN_DATA_FILE)
train_geoms = train_loaded['geoms']
train_labels = train_loaded['building_type']
# Determine final test mode or standard
if len(sys.argv) > 1 and sys.argv[1] in ['-t', '--test']:
print('Training in final test mode')
path = Path(DATA_FOLDER + TEST_DATA_FILE)
if not path.exists():
print("Retrieving test data from web...")
urlretrieve(TEST_DATA_URL, DATA_FOLDER + TEST_DATA_FILE)
test_loaded = np.load(DATA_FOLDER + TEST_DATA_FILE)
test_geoms = test_loaded['geoms']
test_labels = test_loaded['building_type']
else:
print('Training in standard training mode')
# Split the training data in random seen/unseen sets
train_geoms, test_geoms, train_labels, test_labels = train_test_split(train_geoms, train_labels, test_size=0.1)
# Normalize
geom_scale = hp['GEOM_SCALE'] or geom_scaler.scale(train_geoms)
train_geoms = geom_scaler.transform(train_geoms, geom_scale)
test_geoms = geom_scaler.transform(test_geoms, geom_scale) # re-use variance from training
# Sort data according to sequence length
zipped = zip(train_geoms, train_labels)
train_input_sorted = {}
train_labels_sorted = {}
train_labels__max = np.array(train_labels).max()
for geom, label in sorted(zipped, key=lambda x: len(x[0]), reverse=True):
# Map types to one-hot vectors
# noinspection PyUnresolvedReferences
one_hot_label = np.zeros((train_labels__max + 1))
one_hot_label[label] = 1
sequence_len = geom.shape[0]
smallest_size_subset = sorted(train_input_sorted.keys())[0] if train_input_sorted else None
if not smallest_size_subset: # This is the first data point
train_input_sorted[sequence_len] = [geom]
train_labels_sorted[sequence_len] = [one_hot_label]
continue
if sequence_len in train_input_sorted: # the entry exists, append
train_input_sorted[sequence_len].append(geom)
train_labels_sorted[sequence_len].append(one_hot_label)
continue
# the size subset does not exist yet
# append the data to the smallest size subset if it isn't batch-sized yet
if len(train_input_sorted[smallest_size_subset]) < hp['BATCH_SIZE']:
geom = pad_sequences([geom], smallest_size_subset)[0] # make it the same size as the rest in the subset
train_input_sorted[smallest_size_subset].append(geom)
train_labels_sorted[smallest_size_subset].append(one_hot_label)
else:
train_input_sorted[sequence_len] = [geom]
train_labels_sorted[sequence_len] = [one_hot_label]
# Shape determination
geom_vector_len = train_geoms[0].shape[1]
output_size = train_labels__max + 1
# Build model
inputs = Input(shape=(None, geom_vector_len))
model = Conv1D(32, (5,), activation='relu', padding='SAME')(inputs)
# model = Conv1D(32, (5,), activation='relu', padding='SAME')(model)
model = MaxPooling1D(3)(model)
model = Conv1D(64, (5,), activation='relu', padding='SAME')(model)
model = GlobalAveragePooling1D()(model)
model = Dense(hp['DENSE_SIZE'], activation='relu')(model)
model = Dropout(hp['DROPOUT'])(model)
model = Dense(output_size, activation='softmax')(model)
model = Model(inputs=inputs, outputs=model)
model.compile(
loss='categorical_crossentropy',
metrics=['accuracy'],
optimizer=OPTIMIZER),
model.summary()
# Callbacks
callbacks = [TensorBoard(log_dir='./tensorboard_log/' + SIGNATURE, write_graph=False)]
pgb = ProgressBar()
for epoch in range(hp['EPOCHS']):
for sequence_len in sorted(train_input_sorted.keys()):
message = 'Epoch {} of {}, sequence length {}'.format(epoch + 1, hp['EPOCHS'], sequence_len)
pgb.update_progress(epoch/hp['EPOCHS'], message)
inputs = np.array(train_input_sorted[sequence_len])
labels = np.array(train_labels_sorted[sequence_len])
model.fit(
x=inputs,
y=labels,
verbose=0,
epochs=epoch + 1,
initial_epoch=epoch,
batch_size=hp['BATCH_SIZE'],
validation_split=hp['TRAIN_VALIDATE_SPLIT'],
callbacks=callbacks)
# Run on unseen test data
print('\n\nRun on test data...')
test_preds = [model.predict(np.array([test])) for test in test_geoms]
test_preds = [np.argmax(pred) for pred in test_preds]
accuracy = accuracy_score(test_labels, test_preds)
runtime = time() - SCRIPT_START
message = 'on {} completed with accuracy of \n{:f} \nin {} in {} epochs\n'.format(
socket.gethostname(), accuracy, timedelta(seconds=runtime), hp['EPOCHS'])
for key, value in sorted(hp.items()):
message += '{}: {}\t'.format(key, value)
notify(SIGNATURE, message)
print(SCRIPT_NAME, 'finished successfully with', message)
| mit |
gautam1858/tensorflow | tensorflow/contrib/factorization/python/ops/gmm_test.py | 41 | 8716 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ops.gmm."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.factorization.python.ops import gmm as gmm_lib
from tensorflow.contrib.learn.python.learn.estimators import kmeans
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import random_seed as random_seed_lib
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.platform import test
from tensorflow.python.training import queue_runner
class GMMTest(test.TestCase):
def input_fn(self, batch_size=None, points=None):
batch_size = batch_size or self.batch_size
points = points if points is not None else self.points
num_points = points.shape[0]
def _fn():
x = constant_op.constant(points)
if batch_size == num_points:
return x, None
indices = random_ops.random_uniform(constant_op.constant([batch_size]),
minval=0, maxval=num_points-1,
dtype=dtypes.int32,
seed=10)
return array_ops.gather(x, indices), None
return _fn
def setUp(self):
np.random.seed(3)
random_seed_lib.set_random_seed(2)
self.num_centers = 2
self.num_dims = 2
self.num_points = 4000
self.batch_size = self.num_points
self.true_centers = self.make_random_centers(self.num_centers,
self.num_dims)
self.points, self.assignments = self.make_random_points(
self.true_centers, self.num_points)
# Use initial means from kmeans (just like scikit-learn does).
clusterer = kmeans.KMeansClustering(num_clusters=self.num_centers)
clusterer.fit(input_fn=lambda: (constant_op.constant(self.points), None),
steps=30)
self.initial_means = clusterer.clusters()
@staticmethod
def make_random_centers(num_centers, num_dims):
return np.round(
np.random.rand(num_centers, num_dims).astype(np.float32) * 500)
@staticmethod
def make_random_points(centers, num_points):
num_centers, num_dims = centers.shape
assignments = np.random.choice(num_centers, num_points)
offsets = np.round(
np.random.randn(num_points, num_dims).astype(np.float32) * 20)
points = centers[assignments] + offsets
return (points, assignments)
def test_weights(self):
"""Tests the shape of the weights."""
gmm = gmm_lib.GMM(self.num_centers,
initial_clusters=self.initial_means,
random_seed=4,
config=run_config.RunConfig(tf_random_seed=2))
gmm.fit(input_fn=self.input_fn(), steps=0)
weights = gmm.weights()
self.assertAllEqual(list(weights.shape), [self.num_centers])
def test_clusters(self):
"""Tests the shape of the clusters."""
gmm = gmm_lib.GMM(self.num_centers,
initial_clusters=self.initial_means,
random_seed=4,
config=run_config.RunConfig(tf_random_seed=2))
gmm.fit(input_fn=self.input_fn(), steps=0)
clusters = gmm.clusters()
self.assertAllEqual(list(clusters.shape), [self.num_centers, self.num_dims])
def test_fit(self):
gmm = gmm_lib.GMM(self.num_centers,
initial_clusters='random',
random_seed=4,
config=run_config.RunConfig(tf_random_seed=2))
gmm.fit(input_fn=self.input_fn(), steps=1)
score1 = gmm.score(input_fn=self.input_fn(batch_size=self.num_points),
steps=1)
gmm.fit(input_fn=self.input_fn(), steps=10)
score2 = gmm.score(input_fn=self.input_fn(batch_size=self.num_points),
steps=1)
self.assertLess(score1, score2)
def test_infer(self):
gmm = gmm_lib.GMM(self.num_centers,
initial_clusters=self.initial_means,
random_seed=4,
config=run_config.RunConfig(tf_random_seed=2))
gmm.fit(input_fn=self.input_fn(), steps=60)
clusters = gmm.clusters()
# Make a small test set
num_points = 40
points, true_assignments = self.make_random_points(clusters, num_points)
assignments = []
for item in gmm.predict_assignments(
input_fn=self.input_fn(points=points, batch_size=num_points)):
assignments.append(item)
assignments = np.ravel(assignments)
self.assertAllEqual(true_assignments, assignments)
def _compare_with_sklearn(self, cov_type):
# sklearn version.
iterations = 40
np.random.seed(5)
sklearn_assignments = np.asarray([0, 0, 1, 0, 0, 0, 1, 0, 0, 1])
sklearn_means = np.asarray([[144.83417719, 254.20130341],
[274.38754816, 353.16074346]])
sklearn_covs = np.asarray([[[395.0081194, -4.50389512],
[-4.50389512, 408.27543989]],
[[385.17484203, -31.27834935],
[-31.27834935, 391.74249925]]])
# skflow version.
gmm = gmm_lib.GMM(self.num_centers,
initial_clusters=self.initial_means,
covariance_type=cov_type,
config=run_config.RunConfig(tf_random_seed=2))
gmm.fit(input_fn=self.input_fn(), steps=iterations)
points = self.points[:10, :]
skflow_assignments = []
for item in gmm.predict_assignments(
input_fn=self.input_fn(points=points, batch_size=10)):
skflow_assignments.append(item)
self.assertAllClose(sklearn_assignments,
np.ravel(skflow_assignments).astype(int))
self.assertAllClose(sklearn_means, gmm.clusters())
if cov_type == 'full':
self.assertAllClose(sklearn_covs, gmm.covariances(), rtol=0.01)
else:
for d in [0, 1]:
self.assertAllClose(
np.diag(sklearn_covs[d]), gmm.covariances()[d, :], rtol=0.01)
def test_compare_full(self):
self._compare_with_sklearn('full')
def test_compare_diag(self):
self._compare_with_sklearn('diag')
def test_random_input_large(self):
# sklearn version.
iterations = 5 # that should be enough to know whether this diverges
np.random.seed(5)
num_classes = 20
x = np.array([[np.random.random() for _ in range(100)]
for _ in range(num_classes)], dtype=np.float32)
# skflow version.
gmm = gmm_lib.GMM(num_classes,
covariance_type='full',
config=run_config.RunConfig(tf_random_seed=2))
def get_input_fn(x):
def input_fn():
return constant_op.constant(x.astype(np.float32)), None
return input_fn
gmm.fit(input_fn=get_input_fn(x), steps=iterations)
self.assertFalse(np.isnan(gmm.clusters()).any())
class GMMTestQueues(test.TestCase):
def input_fn(self):
def _fn():
queue = data_flow_ops.FIFOQueue(capacity=10,
dtypes=dtypes.float32,
shapes=[10, 3])
enqueue_op = queue.enqueue(array_ops.zeros([10, 3], dtype=dtypes.float32))
queue_runner.add_queue_runner(queue_runner.QueueRunner(queue,
[enqueue_op]))
return queue.dequeue(), None
return _fn
# This test makes sure that there are no deadlocks when using a QueueRunner.
# Note that since cluster initialization is dependent on inputs, if input
# is generated using a QueueRunner, one has to make sure that these runners
# are started before the initialization.
def test_queues(self):
gmm = gmm_lib.GMM(2, covariance_type='diag')
gmm.fit(input_fn=self.input_fn(), steps=1)
if __name__ == '__main__':
test.main()
| apache-2.0 |
bsipocz/statsmodels | statsmodels/examples/example_kde.py | 34 | 1066 |
from __future__ import print_function
import numpy as np
from scipy import stats
from statsmodels.distributions.mixture_rvs import mixture_rvs
from statsmodels.nonparametric.kde import kdensityfft
import matplotlib.pyplot as plt
np.random.seed(12345)
obs_dist = mixture_rvs([.25,.75], size=10000, dist=[stats.norm, stats.norm],
kwargs = (dict(loc=-1,scale=.5),dict(loc=1,scale=.5)))
#.. obs_dist = mixture_rvs([.25,.75], size=10000, dist=[stats.norm, stats.beta],
#.. kwargs = (dict(loc=-1,scale=.5),dict(loc=1,scale=1,args=(1,.5))))
f_hat, grid, bw = kdensityfft(obs_dist, kernel="gauss", bw="scott")
# Check the plot
plt.figure()
plt.hist(obs_dist, bins=50, normed=True, color='red')
plt.plot(grid, f_hat, lw=2, color='black')
plt.show()
# do some timings
# get bw first because they're not streamlined
from statsmodels.nonparametric import bandwidths
bw = bandwidths.bw_scott(obs_dist)
#.. timeit kdensity(obs_dist, kernel="gauss", bw=bw, gridsize=2**10)
#.. timeit kdensityfft(obs_dist, kernel="gauss", bw=bw, gridsize=2**10)
| bsd-3-clause |
CamDavidsonPilon/StartupFiles | startup/00-imports.py | 1 | 1193 |
# Add non-system directories to sys.path
# Uses a list of top level directories of any and all of your repos.
# Loop below will recurse through the path, ignoring .git
# sub-directories and not diving into packages.
# edit as needed...
# PYTHON_MODULES_DIR_LIST = [os.path.join(os.path.expandvars('$HOME'),'DIRECTORY_NAME')]
PYTHON_MODULES_DIR_LIST = []
import os
import sys
for python_module_dir in PYTHON_MODULES_DIR_LIST:
print 'Recurseively adding %s to the python path...' % python_module_dir
sys.path.append(python_module_dir)
for root, dirs, files in os.walk(python_module_dir):
for d in dirs:
if '.git' in root or '.git' in d: # ignore the .git directory
pass
# add non-package directories to the path
elif not os.path.exists(os.path.join(root, d, '__init__.py')):
fulldir = os.path.join(root,d)
print '---> Adding %s to the python path...' % fulldir
sys.path.append(fulldir)
# Data analysis imports
import numpy as np
import pandas as pd
# IO imports
import json
#IPython references
from IPython.core.magic import register_line_magic
_ip = get_ipython()
| mit |
meduz/scikit-learn | examples/classification/plot_digits_classification.py | 34 | 2409 | """
================================
Recognizing hand-written digits
================================
An example showing how the scikit-learn can be used to recognize images of
hand-written digits.
This example is commented in the
:ref:`tutorial section of the user manual <introduction>`.
"""
print(__doc__)
# Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
# License: BSD 3 clause
# Standard scientific Python imports
import matplotlib.pyplot as plt
# Import datasets, classifiers and performance metrics
from sklearn import datasets, svm, metrics
# The digits dataset
digits = datasets.load_digits()
# The data that we are interested in is made of 8x8 images of digits, let's
# have a look at the first 4 images, stored in the `images` attribute of the
# dataset. If we were working from image files, we could load them using
# matplotlib.pyplot.imread. Note that each image must have the same size. For these
# images, we know which digit they represent: it is given in the 'target' of
# the dataset.
images_and_labels = list(zip(digits.images, digits.target))
for index, (image, label) in enumerate(images_and_labels[:4]):
plt.subplot(2, 4, index + 1)
plt.axis('off')
plt.imshow(image, cmap=plt.cm.gray_r, interpolation='nearest')
plt.title('Training: %i' % label)
# To apply a classifier on this data, we need to flatten the image, to
# turn the data in a (samples, feature) matrix:
n_samples = len(digits.images)
data = digits.images.reshape((n_samples, -1))
# Create a classifier: a support vector classifier
classifier = svm.SVC(gamma=0.001)
# We learn the digits on the first half of the digits
classifier.fit(data[:n_samples / 2], digits.target[:n_samples / 2])
# Now predict the value of the digit on the second half:
expected = digits.target[n_samples / 2:]
predicted = classifier.predict(data[n_samples / 2:])
print("Classification report for classifier %s:\n%s\n"
% (classifier, metrics.classification_report(expected, predicted)))
print("Confusion matrix:\n%s" % metrics.confusion_matrix(expected, predicted))
images_and_predictions = list(zip(digits.images[n_samples / 2:], predicted))
for index, (image, prediction) in enumerate(images_and_predictions[:4]):
plt.subplot(2, 4, index + 5)
plt.axis('off')
plt.imshow(image, cmap=plt.cm.gray_r, interpolation='nearest')
plt.title('Prediction: %i' % prediction)
plt.show()
| bsd-3-clause |
michaelchu/kaleidoscope | kaleidoscope/options/option_strategies.py | 1 | 13792 | from datetime import timedelta
import pandas as pd
from kaleidoscope.globals import OptionType, Period
from kaleidoscope.options.option_query import OptionQuery
from kaleidoscope.options.option_strategy import OptionStrategy
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', None)
class OptionStrategies(object):
"""
Static methods to define option strategies
"""
@staticmethod
def single(chain, **params):
if 'option_type' not in params:
raise ValueError("Must provide option_type for single option")
chains = OptionQuery(chain).option_type(params['option_type'])
chains = chains.lte('expiration', params['DTE']).fetch() if 'DTE' in params else chains.fetch()
original_chains = chains
chains['symbol'] = '.' + chains['symbol']
chains['mark'] = (chains['bid'] + chains['ask']) / 2
chains['volume'] = chains['trade_volume']
new_col = ['symbol', 'underlying_symbol', 'quote_date', 'expiration', 'volume', 'mark']
for greek in ['delta', 'theta', 'gamma', 'vega', 'rho']:
if greek in chains.columns:
chains[greek] = chains[greek] - chains[greek + "_shifted"]
new_col.append(greek)
return OptionStrategy(chains[new_col], original_chains, "Single")
@staticmethod
def vertical(chain, **params):
"""
The vertical spread is an option spread strategy whereby the
option trader purchases a certain number of options and simultaneously
sell an equal number of options of the same class, same underlying security,
same expiration date, but at a different strike price.
:param chain: Filtered Dataframe to vertical spreads with.
:param width: Distance in value between the strikes to construct vertical spreads with.
:param option_type: The option type for this spread
:return: A new dataframe containing all vertical spreads created from dataframe
"""
if 'option_type' not in params or 'width' not in params:
raise ValueError("Must provide option_type and width parameters for vertical spreads")
elif params['width'] <= 0:
raise ValueError("Width of vertical spreads cannot be less than or equal 0")
elif not isinstance(chain, OptionQuery):
raise ValueError("Parameter 'chain' must be of OptionQuery type")
chains = chain.option_type(params['option_type'])
chains = chains.lte('expiration', params['DTE']).fetch() if 'DTE' in params else chains.fetch()
original_chains = chains
# shift only the strikes since this is a vertical spread
chains['strike_key'] = chains['strike'] + (params['width'] * params['option_type'].value[1])
left_keys = ['quote_date', 'expiration', 'root', 'option_type', 'strike_key']
right_keys = ['quote_date', 'expiration', 'root', 'option_type', 'strike']
chains = chains.merge(chains, left_on=left_keys, right_on=right_keys, suffixes=('', '_shifted'))
chains['symbol'] = '.' + chains['symbol'] + '-.' + chains['symbol_shifted']
chains['mark'] = ((chains['bid'] - chains['ask_shifted']) + (chains['ask'] - chains['bid_shifted'])) / 2
chains['volume'] = chains['trade_volume'] + chains['trade_volume_shifted']
new_col = ['symbol', 'underlying_symbol', 'quote_date', 'expiration', 'volume', 'mark']
for greek in ['delta', 'theta', 'gamma', 'vega', 'rho']:
if greek in chains.columns:
chains[greek] = chains[greek] - chains[greek + "_shifted"]
new_col.append(greek)
return OptionStrategy(chains[new_col], original_chains, "Vertical")
@staticmethod
def iron_condor(chain, **params):
"""
The iron condor is an option trading strategy utilizing two vertical spreads
– a put spread and a call spread with the same expiration and four different strikes.
A long iron condor is essentially selling both sides of the underlying instrument by
simultaneously shorting the same number of calls and puts, then covering each position
with the purchase of further out of the money call(s) and put(s) respectively.
The converse produces a short iron condor.
:param chain: Filtered Dataframe to vertical spreads with.
:param width: Width between the middle strikes.
:param c_width: Width of the call spreads
:param p_width: Width of the put spreads
:return: A new dataframe containing all iron condors created from dataframe
"""
if 'c_width' not in params or 'p_width' not in params or 'width' not in params:
raise ValueError("Must define all widths for iron condor")
elif params['width'] <= 0 or params['c_width'] <= 0 or params['p_width'] <= 0:
raise ValueError("Widths cannot be less than or equal 0")
elif not isinstance(chain, OptionQuery):
raise ValueError("Parameter 'chain' must be of OptionQuery type")
chains = chain.lte('expiration', params['DTE']) if 'DTE' in params else chain
original_chains = chains
call_chains = chains.calls().fetch()
put_chains = chains.puts().fetch()
# shift only the strikes since this is a vertical spread
call_chains['strike_key'] = call_chains['strike'] + (params['c_width'] * OptionType.CALL.value[1])
put_chains['strike_key'] = put_chains['strike'] + (params['p_width'] * OptionType.PUT.value[1])
left_keys = ['quote_date', 'expiration', 'root', 'option_type', 'strike_key']
right_keys = ['quote_date', 'expiration', 'root', 'option_type', 'strike']
# CALL SIDE ===================================================================================================
call_side = call_chains.merge(call_chains, left_on=left_keys, right_on=right_keys, suffixes=('', '_shifted'))
call_side['symbol'] = '.' + call_side['symbol'] + '-.' + call_side['symbol_shifted']
call_side['mark'] = ((call_side['bid'] - call_side['ask_shifted']) +
(call_side['ask'] - call_side['bid_shifted'])) / 2
call_side['volume'] = call_side['trade_volume'] + call_side['trade_volume_shifted']
# PUT SIDE ====================================================================================================
put_side = put_chains.merge(put_chains, left_on=left_keys, right_on=right_keys, suffixes=('', '_shifted'))
put_side['symbol'] = '.' + put_side['symbol'] + '-.' + put_side['symbol_shifted']
put_side['mark'] = ((put_side['bid'] - put_side['ask_shifted']) +
(put_side['ask'] - put_side['bid_shifted'])) / 2
put_side['volume'] = put_side['trade_volume'] + put_side['trade_volume_shifted']
put_side['strike_key'] = put_side['strike'] + params['width']
# MERGED ======================================================================================================
call_side_keys = ['quote_date', 'underlying_symbol', 'expiration', 'root', 'strike']
put_side_keys = ['quote_date', 'underlying_symbol', 'expiration', 'root', 'strike_key']
chains = call_side.merge(put_side, left_on=call_side_keys, right_on=put_side_keys, suffixes=('_c', '_p'))
chains['symbol'] = chains['symbol_c'] + '+' + chains['symbol_p']
chains['mark'] = chains['mark_c'] + chains['mark_p']
chains['volume'] = chains['trade_volume_c'] + chains['trade_volume_p']
new_col = ['symbol', 'underlying_symbol', 'quote_date', 'expiration', 'volume', 'mark']
for greek in ['delta', 'theta', 'gamma', 'vega', 'rho']:
if greek in chains.columns:
chains[greek] = chains[greek] - chains[greek + "c_shifted"]
new_col.append(greek)
return OptionStrategy(chains[new_col], original_chains, "Iron Condor")
@staticmethod
def covered_stock(chain, **params):
"""
A covered call is an options strategy whereby an investor holds a long position
n an asset and writes (sells) call options on that same asset in an attempt to
generate increased income from the asset.
Writing covered puts is a bearish options trading strategy involving the
writing of put options while shorting the obligated shares of the underlying stock.
:param chain: Filtered Dataframe to vertical spreads with.
:param option_type: The option type for this spread
:return: A new dataframe containing all covered stock created from dataframe
"""
if 'option_type' not in params:
raise ValueError("Must provide option_type for covered stock")
# set the attributes for this option strategy
OptionStrategies.covered_stock.option_config = {'stock': 100, 'option': 1}
out_col = OptionStrategies.base_out_col
chains = OptionQuery(chain).option_type(params['option_type'])
chains = chains.lte('expiration', params['DTE']).fetch() if 'DTE' in params else chains.fetch()
side = -1 * params['option_type'].value[1]
chains['spread_mark'] = (side * (chains['bid'] + chains['ask']) / 2) + chains['underlying_price']
prefix = "-." if params['option_type'] == OptionType.CALL else "."
chains['spread_symbol'] = prefix + chains['symbol'] + "+100*" + chains['underlying_symbol']
return chains[out_col + ['strike']]
@staticmethod
def diagonal(chain, **params):
pass
@staticmethod
def double_diagonal(chain, **params):
pass
@staticmethod
def calendar(chain, **params):
"""
A calendar spread is a strategy involving buying longer term options and selling
equal number of shorter term options of the same underlying stock or index with the
same strike price. Calendar spreads can be done with calls or with puts,
which are virtually equivalent if using same strikes and expirations.
They can use ATM (At The Money) strikes which make the trade neutral.
If using OTM (Out Of The Money) or ITM (In The Money) strikes,
the trade becomes directionally biased.
:param chain: Filtered Dataframe to vertical spreads with.
:param option_type: The option type for this spread
:param depth: The period to represent the difference between the expiration dates of the two options
:return: A new dataframe containing all covered stock created from dataframe
"""
if 'option_type' not in params:
raise ValueError("Must provide option_type for calendar spread")
elif 'depth' not in params:
raise ValueError("Must provide period depth for calender spread")
# set the attributes for this option strategy
OptionStrategies.calendar.option_config = {'option': 2}
out_col = OptionStrategies.base_out_col
shift = Period.ONE_WEEK if 'depth' not in params else params['depth']
chains = OptionQuery(chain).option_type(params['option_type'])
chains = chains.lte('expiration', params['DTE']).fetch() if 'DTE' in params else chains.fetch()
# create column with expiration shifted by depth
chains['expiration_key'] = chains['expiration'] + timedelta(days=shift.value)
left_keys = ['quote_date', 'expiration_key', 'option_type', 'strike']
right_keys = ['quote_date', 'expiration', 'option_type', 'strike']
chains = chains.merge(chains, left_on=left_keys, right_on=right_keys, suffixes=('', '_shifted'))
if chains.empty:
raise ValueError("Cannot construct calendar spreads. Check expirations exists for specified depth.")
# calculate the spread's bid and ask prices
for c, f in OptionStrategies.shift_col:
# handle bid ask special case
if c == 'bid':
chains['spread_' + c] = f(chains[c], chains['ask_shifted'])
elif c == 'ask':
chains['spread_' + c] = f(chains[c], chains['bid_shifted'])
else:
if f is not None:
chains['spread_' + c] = f(chains[c], chains[c + '_shifted'])
chains['spread_mark'] = (chains['spread_bid'] + chains['spread_ask']) / 2
chains['spread_symbol'] = "." + chains['symbol_shifted'] + "-." + chains['symbol']
# assign the strategy name to this dataframe's name attribute
chains.name = OptionStrategies.single.__name__
return chains[out_col + ['strike', 'expiration_shifted']]
@staticmethod
def straddle(chain, **params):
pass
@staticmethod
def strangle(chain, **params):
pass
@staticmethod
def combo(chain, **params):
pass
@staticmethod
def back_ratio(chain, **params):
pass
@staticmethod
def butterfly(chain, **params):
pass
@staticmethod
def condor(chain, **params):
pass
@staticmethod
def custom(chain, **params):
pass
def construct(symbol, strategy, chains, **kwargs):
"""
This is a convenience method to allow for creation of option spreads
from predefined sources.
:param symbol: The symbol of the option chains
:param strategy: The option strategy filter to use
:param chains: Option chains data to use. This data should come from data.get() method
:param kwargs: Parameters used to construct the spreads
:return:
"""
# wrap dataframes into OptionSeries object to be used in backtest
spread_chains = strategy(chains, **kwargs)
return OptionSeries(symbol, strategy.__name__, spread_chains, **kwargs)
| mit |
Azure/azure-sdk-for-python | sdk/monitor/azure-monitor-query/samples/async_samples/sample_log_query_client_async.py | 1 | 2903 | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import asyncio
import os
import pandas as pd
from azure.monitor.query.aio import LogsQueryClient
from azure.identity.aio import DefaultAzureCredential
async def logs_query():
credential = DefaultAzureCredential(
client_id = os.environ['AZURE_CLIENT_ID'],
client_secret = os.environ['AZURE_CLIENT_SECRET'],
tenant_id = os.environ['AZURE_TENANT_ID']
)
client = LogsQueryClient(credential)
# Response time trend
# request duration over the last 12 hours.
query = """AppRequests |
where TimeGenerated > ago(12h) |
summarize avgRequestDuration=avg(DurationMs) by bin(TimeGenerated, 10m), _ResourceId"""
# returns LogsQueryResults
async with client:
response = await client.query(os.environ['LOG_WORKSPACE_ID'], query)
if not response.tables:
print("No results for the query")
for table in response.tables:
df = pd.DataFrame(table.rows, columns=[col.name for col in table.columns])
print(df)
"""
TimeGenerated _ResourceId avgRequestDuration
0 2021-05-27T08:40:00Z /subscriptions/faa080af-c1d8-40ad-9cce-e1a450c... 27.307699999999997
1 2021-05-27T08:50:00Z /subscriptions/faa080af-c1d8-40ad-9cce-e1a450c... 18.11655
2 2021-05-27T09:00:00Z /subscriptions/faa080af-c1d8-40ad-9cce-e1a450c... 24.5271
"""
# if you dont want to use pandas - here's how you can process it.
#response.tables is a LogsQueryResultTable
for table in response.tables:
for col in table.columns: #LogsQueryResultColumn
print(col.name + "/"+ col.type + " | ", end="")
print("\n")
for row in table.rows:
for item in row:
print(item + " | ", end="")
print("\n")
"""
TimeGenerated/datetime | _ResourceId/string | avgRequestDuration/real |
2021-05-11T08:20:00Z | /subscriptions/<subscription id>/resourcegroups/cobey-azuresdkshinydashboardgrp/providers/microsoft.insights/components/cobey-willthisbestatic | 10.8915 |
2021-05-11T08:30:00Z | /subscriptions/<subscription id>/resourcegroups/cobey-azuresdkshinydashboardgrp/providers/microsoft.insights/components/cobey-willthisbestatic | 33.23276666666667 |
2021-05-11T08:40:00Z | /subscriptions/<subscription id>/resourcegroups/cobey-azuresdkshinydashboardgrp/providers/microsoft.insights/components/cobey-willthisbestatic | 21.83535 |
2021-05-11T08:50:00Z | /subscriptions/<subscription id>/resourcegroups/cobey-azuresdkshinydashboardgrp/providers/microsoft.insights/components/cobey-willthisbestatic | 11.028649999999999 |
"""
if __name__ == '__main__':
loop = asyncio.get_event_loop()
loop.run_until_complete(logs_query()) | mit |
buqing2009/MissionPlanner | Lib/site-packages/scipy/signal/ltisys.py | 53 | 23848 | """
ltisys -- a collection of classes and functions for modeling linear
time invariant systems.
"""
#
# Author: Travis Oliphant 2001
#
# Feb 2010: Warren Weckesser
# Rewrote lsim2 and added impulse2.
#
from filter_design import tf2zpk, zpk2tf, normalize
import numpy
from numpy import product, zeros, array, dot, transpose, ones, \
nan_to_num, zeros_like, linspace
#import scipy.interpolate as interpolate
import scipy.integrate as integrate
import scipy.linalg as linalg
from numpy import r_, eye, real, atleast_1d, atleast_2d, poly, \
squeeze, diag, asarray
def tf2ss(num, den):
"""Transfer function to state-space representation.
Parameters
----------
num, den : array_like
Sequences representing the numerator and denominator
polynomials.
Returns
-------
A, B, C, D : ndarray
State space representation of the system.
"""
# Controller canonical state-space representation.
# if M+1 = len(num) and K+1 = len(den) then we must have M <= K
# states are found by asserting that X(s) = U(s) / D(s)
# then Y(s) = N(s) * X(s)
#
# A, B, C, and D follow quite naturally.
#
num, den = normalize(num, den) # Strips zeros, checks arrays
nn = len(num.shape)
if nn == 1:
num = asarray([num], num.dtype)
M = num.shape[1]
K = len(den)
if (M > K):
raise ValueError("Improper transfer function.")
if (M == 0 or K == 0): # Null system
return array([],float), array([], float), array([], float), \
array([], float)
# pad numerator to have same number of columns has denominator
num = r_['-1',zeros((num.shape[0],K-M), num.dtype), num]
if num.shape[-1] > 0:
D = num[:,0]
else:
D = array([],float)
if K == 1:
return array([], float), array([], float), array([], float), D
frow = -array([den[1:]])
A = r_[frow, eye(K-2, K-1)]
B = eye(K-1, 1)
C = num[:,1:] - num[:,0] * den[1:]
return A, B, C, D
def _none_to_empty(arg):
if arg is None:
return []
else:
return arg
def abcd_normalize(A=None, B=None, C=None, D=None):
"""Check state-space matrices and ensure they are rank-2.
"""
A, B, C, D = map(_none_to_empty, (A, B, C, D))
A, B, C, D = map(atleast_2d, (A, B, C, D))
if ((len(A.shape) > 2) or (len(B.shape) > 2) or \
(len(C.shape) > 2) or (len(D.shape) > 2)):
raise ValueError("A, B, C, D arrays can be no larger than rank-2.")
MA, NA = A.shape
MB, NB = B.shape
MC, NC = C.shape
MD, ND = D.shape
if (MC == 0) and (NC == 0) and (MD != 0) and (NA != 0):
MC, NC = MD, NA
C = zeros((MC, NC))
if (MB == 0) and (NB == 0) and (MA != 0) and (ND != 0):
MB, NB = MA, ND
B = zeros(MB, NB)
if (MD == 0) and (ND == 0) and (MC != 0) and (NB != 0):
MD, ND = MC, NB
D = zeros(MD, ND)
if (MA == 0) and (NA == 0) and (MB != 0) and (NC != 0):
MA, NA = MB, NC
A = zeros(MA, NA)
if MA != NA:
raise ValueError("A must be square.")
if MA != MB:
raise ValueError("A and B must have the same number of rows.")
if NA != NC:
raise ValueError("A and C must have the same number of columns.")
if MD != MC:
raise ValueError("C and D must have the same number of rows.")
if ND != NB:
raise ValueError("B and D must have the same number of columns.")
return A, B, C, D
def ss2tf(A, B, C, D, input=0):
"""State-space to transfer function.
Parameters
----------
A, B, C, D : ndarray
State-space representation of linear system.
input : int, optional
For multiple-input systems, the input to use.
Returns
-------
num, den : 1D ndarray
Numerator and denominator polynomials (as sequences)
respectively.
"""
# transfer function is C (sI - A)**(-1) B + D
A, B, C, D = map(asarray, (A, B, C, D))
# Check consistency and
# make them all rank-2 arrays
A, B, C, D = abcd_normalize(A, B, C, D)
nout, nin = D.shape
if input >= nin:
raise ValueError("System does not have the input specified.")
# make MOSI from possibly MOMI system.
if B.shape[-1] != 0:
B = B[:,input]
B.shape = (B.shape[0],1)
if D.shape[-1] != 0:
D = D[:,input]
try:
den = poly(A)
except ValueError:
den = 1
if (product(B.shape,axis=0) == 0) and (product(C.shape,axis=0) == 0):
num = numpy.ravel(D)
if (product(D.shape,axis=0) == 0) and (product(A.shape,axis=0) == 0):
den = []
return num, den
num_states = A.shape[0]
type_test = A[:,0] + B[:,0] + C[0,:] + D
num = numpy.zeros((nout, num_states+1), type_test.dtype)
for k in range(nout):
Ck = atleast_2d(C[k,:])
num[k] = poly(A - dot(B,Ck)) + (D[k]-1)*den
return num, den
def zpk2ss(z, p, k):
"""Zero-pole-gain representation to state-space representation
Parameters
----------
z, p : sequence
Zeros and poles.
k : float
System gain.
Returns
-------
A, B, C, D : ndarray
State-space matrices.
"""
return tf2ss(*zpk2tf(z,p,k))
def ss2zpk(A, B, C, D, input=0):
"""State-space representation to zero-pole-gain representation.
Parameters
----------
A, B, C, D : ndarray
State-space representation of linear system.
input : int, optional
For multiple-input systems, the input to use.
Returns
-------
z, p : sequence
Zeros and poles.
k : float
System gain.
"""
return tf2zpk(*ss2tf(A,B,C,D,input=input))
class lti(object):
"""Linear Time Invariant class which simplifies representation.
"""
def __init__(self,*args,**kwords):
"""Initialize the LTI system using either:
(numerator, denominator)
(zeros, poles, gain)
(A, B, C, D) -- state-space.
"""
N = len(args)
if N == 2: # Numerator denominator transfer function input
self.__dict__['num'], self.__dict__['den'] = normalize(*args)
self.__dict__['zeros'], self.__dict__['poles'], \
self.__dict__['gain'] = tf2zpk(*args)
self.__dict__['A'], self.__dict__['B'], \
self.__dict__['C'], \
self.__dict__['D'] = tf2ss(*args)
self.inputs = 1
if len(self.num.shape) > 1:
self.outputs = self.num.shape[0]
else:
self.outputs = 1
elif N == 3: # Zero-pole-gain form
self.__dict__['zeros'], self.__dict__['poles'], \
self.__dict__['gain'] = args
self.__dict__['num'], self.__dict__['den'] = zpk2tf(*args)
self.__dict__['A'], self.__dict__['B'], \
self.__dict__['C'], \
self.__dict__['D'] = zpk2ss(*args)
self.inputs = 1
if len(self.zeros.shape) > 1:
self.outputs = self.zeros.shape[0]
else:
self.outputs = 1
elif N == 4: # State-space form
self.__dict__['A'], self.__dict__['B'], \
self.__dict__['C'], \
self.__dict__['D'] = abcd_normalize(*args)
self.__dict__['zeros'], self.__dict__['poles'], \
self.__dict__['gain'] = ss2zpk(*args)
self.__dict__['num'], self.__dict__['den'] = ss2tf(*args)
self.inputs = self.B.shape[-1]
self.outputs = self.C.shape[0]
else:
raise ValueError("Needs 2, 3, or 4 arguments.")
def __setattr__(self, attr, val):
if attr in ['num','den']:
self.__dict__[attr] = val
self.__dict__['zeros'], self.__dict__['poles'], \
self.__dict__['gain'] = \
tf2zpk(self.num, self.den)
self.__dict__['A'], self.__dict__['B'], \
self.__dict__['C'], \
self.__dict__['D'] = \
tf2ss(self.num, self.den)
elif attr in ['zeros', 'poles', 'gain']:
self.__dict__[attr] = val
self.__dict__['num'], self.__dict__['den'] = \
zpk2tf(self.zeros,
self.poles, self.gain)
self.__dict__['A'], self.__dict__['B'], \
self.__dict__['C'], \
self.__dict__['D'] = \
zpk2ss(self.zeros,
self.poles, self.gain)
elif attr in ['A', 'B', 'C', 'D']:
self.__dict__[attr] = val
self.__dict__['zeros'], self.__dict__['poles'], \
self.__dict__['gain'] = \
ss2zpk(self.A, self.B,
self.C, self.D)
self.__dict__['num'], self.__dict__['den'] = \
ss2tf(self.A, self.B,
self.C, self.D)
else:
self.__dict__[attr] = val
def impulse(self, X0=None, T=None, N=None):
return impulse(self, X0=X0, T=T, N=N)
def step(self, X0=None, T=None, N=None):
return step(self, X0=X0, T=T, N=N)
def output(self, U, T, X0=None):
return lsim(self, U, T, X0=X0)
def lsim2(system, U=None, T=None, X0=None, **kwargs):
"""
Simulate output of a continuous-time linear system, by using
the ODE solver `scipy.integrate.odeint`.
Parameters
----------
system : an instance of the LTI class or a tuple describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 2: (num, den)
* 3: (zeros, poles, gain)
* 4: (A, B, C, D)
U : array_like (1D or 2D), optional
An input array describing the input at each time T. Linear
interpolation is used between given times. If there are
multiple inputs, then each column of the rank-2 array
represents an input. If U is not given, the input is assumed
to be zero.
T : array_like (1D or 2D), optional
The time steps at which the input is defined and at which the
output is desired. The default is 101 evenly spaced points on
the interval [0,10.0].
X0 : array_like (1D), optional
The initial condition of the state vector. If `X0` is not
given, the initial conditions are assumed to be 0.
kwargs : dict
Additional keyword arguments are passed on to the function
odeint. See the notes below for more details.
Returns
-------
T : 1D ndarray
The time values for the output.
yout : ndarray
The response of the system.
xout : ndarray
The time-evolution of the state-vector.
Notes
-----
This function uses :func:`scipy.integrate.odeint` to solve the
system's differential equations. Additional keyword arguments
given to `lsim2` are passed on to `odeint`. See the documentation
for :func:`scipy.integrate.odeint` for the full list of arguments.
"""
if isinstance(system, lti):
sys = system
else:
sys = lti(*system)
if X0 is None:
X0 = zeros(sys.B.shape[0],sys.A.dtype)
if T is None:
# XXX T should really be a required argument, but U was
# changed from a required positional argument to a keyword,
# and T is after U in the argument list. So we either: change
# the API and move T in front of U; check here for T being
# None and raise an excpetion; or assign a default value to T
# here. This code implements the latter.
T = linspace(0, 10.0, 101)
T = atleast_1d(T)
if len(T.shape) != 1:
raise ValueError("T must be a rank-1 array.")
if U is not None:
U = atleast_1d(U)
if len(U.shape) == 1:
U = U.reshape(-1,1)
sU = U.shape
if sU[0] != len(T):
raise ValueError("U must have the same number of rows "
"as elements in T.")
if sU[1] != sys.inputs:
raise ValueError("The number of inputs in U (%d) is not "
"compatible with the number of system "
"inputs (%d)" % (sU[1], sys.inputs))
# Create a callable that uses linear interpolation to
# calculate the input at any time.
ufunc = interpolate.interp1d(T, U, kind='linear',
axis=0, bounds_error=False)
def fprime(x, t, sys, ufunc):
"""The vector field of the linear system."""
return dot(sys.A,x) + squeeze(dot(sys.B,nan_to_num(ufunc([t]))))
xout = integrate.odeint(fprime, X0, T, args=(sys, ufunc), **kwargs)
yout = dot(sys.C,transpose(xout)) + dot(sys.D,transpose(U))
else:
def fprime(x, t, sys):
"""The vector field of the linear system."""
return dot(sys.A,x)
xout = integrate.odeint(fprime, X0, T, args=(sys,), **kwargs)
yout = dot(sys.C,transpose(xout))
return T, squeeze(transpose(yout)), xout
def lsim(system, U, T, X0=None, interp=1):
"""
Simulate output of a continuous-time linear system.
Parameters
----------
system : an instance of the LTI class or a tuple describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 2: (num, den)
* 3: (zeros, poles, gain)
* 4: (A, B, C, D)
U : array_like
An input array describing the input at each time `T`
(interpolation is assumed between given times). If there are
multiple inputs, then each column of the rank-2 array
represents an input.
T : array_like
The time steps at which the input is defined and at which the
output is desired.
X0 :
The initial conditions on the state vector (zero by default).
interp : {1, 0}
Whether to use linear (1) or zero-order hold (0) interpolation.
Returns
-------
T : 1D ndarray
Time values for the output.
yout : 1D ndarray
System response.
xout : ndarray
Time-evolution of the state-vector.
"""
# system is an lti system or a sequence
# with 2 (num, den)
# 3 (zeros, poles, gain)
# 4 (A, B, C, D)
# describing the system
# U is an input vector at times T
# if system describes multiple inputs
# then U can be a rank-2 array with the number of columns
# being the number of inputs
if isinstance(system, lti):
sys = system
else:
sys = lti(*system)
U = atleast_1d(U)
T = atleast_1d(T)
if len(U.shape) == 1:
U = U.reshape((U.shape[0],1))
sU = U.shape
if len(T.shape) != 1:
raise ValueError("T must be a rank-1 array.")
if sU[0] != len(T):
raise ValueError("U must have the same number of rows "
"as elements in T.")
if sU[1] != sys.inputs:
raise ValueError("System does not define that many inputs.")
if X0 is None:
X0 = zeros(sys.B.shape[0], sys.A.dtype)
xout = zeros((len(T),sys.B.shape[0]), sys.A.dtype)
xout[0] = X0
A = sys.A
AT, BT = transpose(sys.A), transpose(sys.B)
dt = T[1]-T[0]
lam, v = linalg.eig(A)
vt = transpose(v)
vti = linalg.inv(vt)
GT = dot(dot(vti,diag(numpy.exp(dt*lam))),vt).astype(xout.dtype)
ATm1 = linalg.inv(AT)
ATm2 = dot(ATm1,ATm1)
I = eye(A.shape[0],dtype=A.dtype)
GTmI = GT-I
F1T = dot(dot(BT,GTmI),ATm1)
if interp:
F2T = dot(BT,dot(GTmI,ATm2)/dt - ATm1)
for k in xrange(1,len(T)):
dt1 = T[k] - T[k-1]
if dt1 != dt:
dt = dt1
GT = dot(dot(vti,diag(numpy.exp(dt*lam))),vt).astype(xout.dtype)
GTmI = GT-I
F1T = dot(dot(BT,GTmI),ATm1)
if interp:
F2T = dot(BT,dot(GTmI,ATm2)/dt - ATm1)
xout[k] = dot(xout[k-1],GT) + dot(U[k-1],F1T)
if interp:
xout[k] = xout[k] + dot((U[k]-U[k-1]),F2T)
yout = squeeze(dot(U,transpose(sys.D))) + squeeze(dot(xout,transpose(sys.C)))
return T, squeeze(yout), squeeze(xout)
def _default_response_times(A, n):
"""Compute a reasonable set of time samples for the response time.
This function is used by `impulse`, `impulse2`, `step` and `step2`
to compute the response time when the `T` argument to the function
is None.
Parameters
----------
A : ndarray
The system matrix, which is square.
n : int
The number of time samples to generate.
Returns
-------
t : ndarray
The 1-D array of length `n` of time samples at which the response
is to be computed.
"""
# Create a reasonable time interval. This could use some more work.
# For example, what is expected when the system is unstable?
vals = linalg.eigvals(A)
r = min(abs(real(vals)))
if r == 0.0:
r = 1.0
tc = 1.0 / r
t = linspace(0.0, 7*tc, n)
return t
def impulse(system, X0=None, T=None, N=None):
"""Impulse response of continuous-time system.
Parameters
----------
system : LTI class or tuple
If specified as a tuple, the system is described as
``(num, den)``, ``(zero, pole, gain)``, or ``(A, B, C, D)``.
X0 : array_like, optional
Initial state-vector. Defaults to zero.
T : array_like, optional
Time points. Computed if not given.
N : int, optional
The number of time points to compute (if `T` is not given).
Returns
-------
T : ndarray
A 1-D array of time points.
yout : ndarray
A 1-D array containing the impulse response of the system (except for
singularities at zero).
"""
if isinstance(system, lti):
sys = system
else:
sys = lti(*system)
if X0 is None:
B = sys.B
else:
B = sys.B + X0
if N is None:
N = 100
if T is None:
T = _default_response_times(sys.A, N)
h = zeros(T.shape, sys.A.dtype)
s,v = linalg.eig(sys.A)
vi = linalg.inv(v)
C = sys.C
for k in range(len(h)):
es = diag(numpy.exp(s*T[k]))
eA = (dot(dot(v,es),vi)).astype(h.dtype)
h[k] = squeeze(dot(dot(C,eA),B))
return T, h
def impulse2(system, X0=None, T=None, N=None, **kwargs):
"""
Impulse response of a single-input, continuous-time linear system.
Parameters
----------
system : an instance of the LTI class or a tuple describing the system.
The following gives the number of elements in the tuple and
the interpretation:
2 (num, den)
3 (zeros, poles, gain)
4 (A, B, C, D)
T : 1-D array_like, optional
The time steps at which the input is defined and at which the
output is desired. If `T` is not given, the function will
generate a set of time samples automatically.
X0 : 1-D array_like, optional
The initial condition of the state vector. Default: 0 (the
zero vector).
N : int, optional
Number of time points to compute. Default: 100.
kwargs : various types
Additional keyword arguments are passed on to the function
`scipy.signal.lsim2`, which in turn passes them on to
`scipy.integrate.odeint`; see the latter's documentation for
information about these arguments.
Returns
-------
T : ndarray
The time values for the output.
yout : ndarray
The output response of the system.
See Also
--------
impulse, lsim2, integrate.odeint
Notes
-----
The solution is generated by calling `scipy.signal.lsim2`, which uses
the differential equation solver `scipy.integrate.odeint`.
.. versionadded:: 0.8.0
Examples
--------
Second order system with a repeated root: x''(t) + 2*x(t) + x(t) = u(t)
>>> system = ([1.0], [1.0, 2.0, 1.0])
>>> t, y = impulse2(system)
>>> import matplotlib.pyplot as plt
>>> plt.plot(t, y)
"""
if isinstance(system, lti):
sys = system
else:
sys = lti(*system)
B = sys.B
if B.shape[-1] != 1:
raise ValueError("impulse2() requires a single-input system.")
B = B.squeeze()
if X0 is None:
X0 = zeros_like(B)
if N is None:
N = 100
if T is None:
T = _default_response_times(sys.A, N)
# Move the impulse in the input to the initial conditions, and then
# solve using lsim2().
U = zeros_like(T)
ic = B + X0
Tr, Yr, Xr = lsim2(sys, U, T, ic, **kwargs)
return Tr, Yr
def step(system, X0=None, T=None, N=None):
"""Step response of continuous-time system.
Parameters
----------
system : an instance of the LTI class or a tuple describing the system.
The following gives the number of elements in the tuple and
the interpretation.
2 (num, den)
3 (zeros, poles, gain)
4 (A, B, C, D)
X0 : array_like, optional
Initial state-vector (default is zero).
T : array_like, optional
Time points (computed if not given).
N : int
Number of time points to compute if `T` is not given.
Returns
-------
T : 1D ndarray
Output time points.
yout : 1D ndarray
Step response of system.
See also
--------
scipy.signal.step2
"""
if isinstance(system, lti):
sys = system
else:
sys = lti(*system)
if N is None:
N = 100
if T is None:
T = _default_response_times(sys.A, N)
U = ones(T.shape, sys.A.dtype)
vals = lsim(sys, U, T, X0=X0)
return vals[0], vals[1]
def step2(system, X0=None, T=None, N=None, **kwargs):
"""Step response of continuous-time system.
This function is functionally the same as `scipy.signal.step`, but
it uses the function `scipy.signal.lsim2` to compute the step
response.
Parameters
----------
system : an instance of the LTI class or a tuple describing the system.
The following gives the number of elements in the tuple and
the interpretation.
2 (num, den)
3 (zeros, poles, gain)
4 (A, B, C, D)
X0 : array_like, optional
Initial state-vector (default is zero).
T : array_like, optional
Time points (computed if not given).
N : int
Number of time points to compute if `T` is not given.
**kwargs :
Additional keyword arguments are passed on the function
`scipy.signal.lsim2`, which in turn passes them on to
:func:`scipy.integrate.odeint`. See the documentation for
:func:`scipy.integrate.odeint` for information about these
arguments.
Returns
-------
T : 1D ndarray
Output time points.
yout : 1D ndarray
Step response of system.
See also
--------
scipy.signal.step
Notes
-----
.. versionadded:: 0.8.0
"""
if isinstance(system, lti):
sys = system
else:
sys = lti(*system)
if N is None:
N = 100
if T is None:
T = _default_response_times(sys.A, N)
U = ones(T.shape, sys.A.dtype)
vals = lsim2(sys, U, T, X0=X0, **kwargs)
return vals[0], vals[1]
| gpl-3.0 |
jseabold/scikit-learn | benchmarks/bench_plot_randomized_svd.py | 38 | 17557 | """
Benchmarks on the power iterations phase in randomized SVD.
We test on various synthetic and real datasets the effect of increasing
the number of power iterations in terms of quality of approximation
and running time. A number greater than 0 should help with noisy matrices,
which are characterized by a slow spectral decay.
We test several policy for normalizing the power iterations. Normalization
is crucial to avoid numerical issues.
The quality of the approximation is measured by the spectral norm discrepancy
between the original input matrix and the reconstructed one (by multiplying
the randomized_svd's outputs). The spectral norm is always equivalent to the
largest singular value of a matrix. (3) justifies this choice. However, one can
notice in these experiments that Frobenius and spectral norms behave
very similarly in a qualitative sense. Therefore, we suggest to run these
benchmarks with `enable_spectral_norm = False`, as Frobenius' is MUCH faster to
compute.
The benchmarks follow.
(a) plot: time vs norm, varying number of power iterations
data: many datasets
goal: compare normalization policies and study how the number of power
iterations affect time and norm
(b) plot: n_iter vs norm, varying rank of data and number of components for
randomized_SVD
data: low-rank matrices on which we control the rank
goal: study whether the rank of the matrix and the number of components
extracted by randomized SVD affect "the optimal" number of power iterations
(c) plot: time vs norm, varing datasets
data: many datasets
goal: compare default configurations
We compare the following algorithms:
- randomized_svd(..., power_iteration_normalizer='none')
- randomized_svd(..., power_iteration_normalizer='LU')
- randomized_svd(..., power_iteration_normalizer='QR')
- randomized_svd(..., power_iteration_normalizer='auto')
- fbpca.pca() from https://github.com/facebook/fbpca (if installed)
Conclusion
----------
- n_iter=2 appears to be a good default value
- power_iteration_normalizer='none' is OK if n_iter is small, otherwise LU
gives similar errors to QR but is cheaper. That's what 'auto' implements.
References
----------
(1) Finding structure with randomness: Stochastic algorithms for constructing
approximate matrix decompositions
Halko, et al., 2009 http://arxiv.org/abs/arXiv:0909.4061
(2) A randomized algorithm for the decomposition of matrices
Per-Gunnar Martinsson, Vladimir Rokhlin and Mark Tygert
(3) An implementation of a randomized algorithm for principal component
analysis
A. Szlam et al. 2014
"""
# Author: Giorgio Patrini
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
import gc
import pickle
from time import time
from collections import defaultdict
import os.path
from sklearn.utils import gen_batches
from sklearn.utils.validation import check_random_state
from sklearn.utils.extmath import randomized_svd
from sklearn.datasets.samples_generator import (make_low_rank_matrix,
make_sparse_uncorrelated)
from sklearn.datasets import (fetch_lfw_people,
fetch_mldata,
fetch_20newsgroups_vectorized,
fetch_olivetti_faces,
fetch_rcv1)
try:
import fbpca
fbpca_available = True
except ImportError:
fbpca_available = False
# If this is enabled, tests are much slower and will crash with the large data
enable_spectral_norm = False
# TODO: compute approximate spectral norms with the power method as in
# Estimating the largest eigenvalues by the power and Lanczos methods with
# a random start, Jacek Kuczynski and Henryk Wozniakowski, SIAM Journal on
# Matrix Analysis and Applications, 13 (4): 1094-1122, 1992.
# This approximation is a very fast estimate of the spectral norm, but depends
# on starting random vectors.
# Determine when to switch to batch computation for matrix norms,
# in case the reconstructed (dense) matrix is too large
MAX_MEMORY = np.int(2e9)
# The following datasets can be dowloaded manually from:
# CIFAR 10: http://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz
# SVHN: http://ufldl.stanford.edu/housenumbers/train_32x32.mat
CIFAR_FOLDER = "./cifar-10-batches-py/"
SVHN_FOLDER = "./SVHN/"
datasets = ['low rank matrix', 'lfw_people', 'olivetti_faces', '20newsgroups',
'MNIST original', 'CIFAR', 'a1a', 'SVHN', 'uncorrelated matrix']
big_sparse_datasets = ['big sparse matrix', 'rcv1']
def unpickle(file_name):
with open(file_name, 'rb') as fo:
return pickle.load(fo, encoding='latin1')["data"]
def handle_missing_dataset(file_folder):
if not os.path.isdir(file_folder):
print("%s file folder not found. Test skipped." % file_folder)
return 0
def get_data(dataset_name):
print("Getting dataset: %s" % dataset_name)
if dataset_name == 'lfw_people':
X = fetch_lfw_people().data
elif dataset_name == '20newsgroups':
X = fetch_20newsgroups_vectorized().data[:, :100000]
elif dataset_name == 'olivetti_faces':
X = fetch_olivetti_faces().data
elif dataset_name == 'rcv1':
X = fetch_rcv1().data
elif dataset_name == 'CIFAR':
if handle_missing_dataset(CIFAR_FOLDER) == "skip":
return
X1 = [unpickle("%sdata_batch_%d" % (CIFAR_FOLDER, i + 1))
for i in range(5)]
X = np.vstack(X1)
del X1
elif dataset_name == 'SVHN':
if handle_missing_dataset(SVHN_FOLDER) == 0:
return
X1 = sp.io.loadmat("%strain_32x32.mat" % SVHN_FOLDER)['X']
X2 = [X1[:, :, :, i].reshape(32 * 32 * 3) for i in range(X1.shape[3])]
X = np.vstack(X2)
del X1
del X2
elif dataset_name == 'low rank matrix':
X = make_low_rank_matrix(n_samples=500, n_features=np.int(1e4),
effective_rank=100, tail_strength=.5,
random_state=random_state)
elif dataset_name == 'uncorrelated matrix':
X, _ = make_sparse_uncorrelated(n_samples=500, n_features=10000,
random_state=random_state)
elif dataset_name == 'big sparse matrix':
sparsity = np.int(1e6)
size = np.int(1e6)
small_size = np.int(1e4)
data = np.random.normal(0, 1, np.int(sparsity/10))
data = np.repeat(data, 10)
row = np.random.uniform(0, small_size, sparsity)
col = np.random.uniform(0, small_size, sparsity)
X = sp.sparse.csr_matrix((data, (row, col)), shape=(size, small_size))
del data
del row
del col
else:
X = fetch_mldata(dataset_name).data
return X
def plot_time_vs_s(time, norm, point_labels, title):
plt.figure()
colors = ['g', 'b', 'y']
for i, l in enumerate(sorted(norm.keys())):
if l is not "fbpca":
plt.plot(time[l], norm[l], label=l, marker='o', c=colors.pop())
else:
plt.plot(time[l], norm[l], label=l, marker='^', c='red')
for label, x, y in zip(point_labels, list(time[l]), list(norm[l])):
plt.annotate(label, xy=(x, y), xytext=(0, -20),
textcoords='offset points', ha='right', va='bottom')
plt.legend(loc="upper right")
plt.suptitle(title)
plt.ylabel("norm discrepancy")
plt.xlabel("running time [s]")
def scatter_time_vs_s(time, norm, point_labels, title):
plt.figure()
size = 100
for i, l in enumerate(sorted(norm.keys())):
if l is not "fbpca":
plt.scatter(time[l], norm[l], label=l, marker='o', c='b', s=size)
for label, x, y in zip(point_labels, list(time[l]), list(norm[l])):
plt.annotate(label, xy=(x, y), xytext=(0, -80),
textcoords='offset points', ha='right',
arrowprops=dict(arrowstyle="->",
connectionstyle="arc3"),
va='bottom', size=11, rotation=90)
else:
plt.scatter(time[l], norm[l], label=l, marker='^', c='red', s=size)
for label, x, y in zip(point_labels, list(time[l]), list(norm[l])):
plt.annotate(label, xy=(x, y), xytext=(0, 30),
textcoords='offset points', ha='right',
arrowprops=dict(arrowstyle="->",
connectionstyle="arc3"),
va='bottom', size=11, rotation=90)
plt.legend(loc="best")
plt.suptitle(title)
plt.ylabel("norm discrepancy")
plt.xlabel("running time [s]")
def plot_power_iter_vs_s(power_iter, s, title):
plt.figure()
for l in sorted(s.keys()):
plt.plot(power_iter, s[l], label=l, marker='o')
plt.legend(loc="lower right", prop={'size': 10})
plt.suptitle(title)
plt.ylabel("norm discrepancy")
plt.xlabel("n_iter")
def svd_timing(X, n_comps, n_iter, n_oversamples,
power_iteration_normalizer='auto', method=None):
"""
Measure time for decomposition
"""
print("... running SVD ...")
if method is not 'fbpca':
gc.collect()
t0 = time()
U, mu, V = randomized_svd(X, n_comps, n_oversamples, n_iter,
power_iteration_normalizer,
random_state=random_state, transpose=False)
call_time = time() - t0
else:
gc.collect()
t0 = time()
# There is a different convention for l here
U, mu, V = fbpca.pca(X, n_comps, raw=True, n_iter=n_iter,
l=n_oversamples+n_comps)
call_time = time() - t0
return U, mu, V, call_time
def norm_diff(A, norm=2, msg=True):
"""
Compute the norm diff with the original matrix, when randomized
SVD is called with *params.
norm: 2 => spectral; 'fro' => Frobenius
"""
if msg:
print("... computing %s norm ..." % norm)
if norm == 2:
# s = sp.linalg.norm(A, ord=2) # slow
value = sp.sparse.linalg.svds(A, k=1, return_singular_vectors=False)
else:
if sp.sparse.issparse(A):
value = sp.sparse.linalg.norm(A, ord=norm)
else:
value = sp.linalg.norm(A, ord=norm)
return value
def scalable_frobenius_norm_discrepancy(X, U, s, V):
# if the input is not too big, just call scipy
if X.shape[0] * X.shape[1] < MAX_MEMORY:
A = X - U.dot(np.diag(s).dot(V))
return norm_diff(A, norm='fro')
print("... computing fro norm by batches...")
batch_size = 1000
Vhat = np.diag(s).dot(V)
cum_norm = .0
for batch in gen_batches(X.shape[0], batch_size):
M = X[batch, :] - U[batch, :].dot(Vhat)
cum_norm += norm_diff(M, norm='fro', msg=False)
return np.sqrt(cum_norm)
def bench_a(X, dataset_name, power_iter, n_oversamples, n_comps):
all_time = defaultdict(list)
if enable_spectral_norm:
all_spectral = defaultdict(list)
X_spectral_norm = norm_diff(X, norm=2, msg=False)
all_frobenius = defaultdict(list)
X_fro_norm = norm_diff(X, norm='fro', msg=False)
for pi in power_iter:
for pm in ['none', 'LU', 'QR']:
print("n_iter = %d on sklearn - %s" % (pi, pm))
U, s, V, time = svd_timing(X, n_comps, n_iter=pi,
power_iteration_normalizer=pm,
n_oversamples=n_oversamples)
label = "sklearn - %s" % pm
all_time[label].append(time)
if enable_spectral_norm:
A = U.dot(np.diag(s).dot(V))
all_spectral[label].append(norm_diff(X - A, norm=2) /
X_spectral_norm)
f = scalable_frobenius_norm_discrepancy(X, U, s, V)
all_frobenius[label].append(f / X_fro_norm)
if fbpca_available:
print("n_iter = %d on fbca" % (pi))
U, s, V, time = svd_timing(X, n_comps, n_iter=pi,
power_iteration_normalizer=pm,
n_oversamples=n_oversamples,
method='fbpca')
label = "fbpca"
all_time[label].append(time)
if enable_spectral_norm:
A = U.dot(np.diag(s).dot(V))
all_spectral[label].append(norm_diff(X - A, norm=2) /
X_spectral_norm)
f = scalable_frobenius_norm_discrepancy(X, U, s, V)
all_frobenius[label].append(f / X_fro_norm)
if enable_spectral_norm:
title = "%s: spectral norm diff vs running time" % (dataset_name)
plot_time_vs_s(all_time, all_spectral, power_iter, title)
title = "%s: Frobenius norm diff vs running time" % (dataset_name)
plot_time_vs_s(all_time, all_frobenius, power_iter, title)
def bench_b(power_list):
n_samples, n_features = 1000, 10000
data_params = {'n_samples': n_samples, 'n_features': n_features,
'tail_strength': .7, 'random_state': random_state}
dataset_name = "low rank matrix %d x %d" % (n_samples, n_features)
ranks = [10, 50, 100]
if enable_spectral_norm:
all_spectral = defaultdict(list)
all_frobenius = defaultdict(list)
for rank in ranks:
X = make_low_rank_matrix(effective_rank=rank, **data_params)
if enable_spectral_norm:
X_spectral_norm = norm_diff(X, norm=2, msg=False)
X_fro_norm = norm_diff(X, norm='fro', msg=False)
for n_comp in [np.int(rank/2), rank, rank*2]:
label = "rank=%d, n_comp=%d" % (rank, n_comp)
print(label)
for pi in power_list:
U, s, V, _ = svd_timing(X, n_comp, n_iter=pi, n_oversamples=2,
power_iteration_normalizer='LU')
if enable_spectral_norm:
A = U.dot(np.diag(s).dot(V))
all_spectral[label].append(norm_diff(X - A, norm=2) /
X_spectral_norm)
f = scalable_frobenius_norm_discrepancy(X, U, s, V)
all_frobenius[label].append(f / X_fro_norm)
if enable_spectral_norm:
title = "%s: spectral norm diff vs n power iteration" % (dataset_name)
plot_power_iter_vs_s(power_iter, all_spectral, title)
title = "%s: frobenius norm diff vs n power iteration" % (dataset_name)
plot_power_iter_vs_s(power_iter, all_frobenius, title)
def bench_c(datasets, n_comps):
all_time = defaultdict(list)
if enable_spectral_norm:
all_spectral = defaultdict(list)
all_frobenius = defaultdict(list)
for dataset_name in datasets:
X = get_data(dataset_name)
if X is None:
continue
if enable_spectral_norm:
X_spectral_norm = norm_diff(X, norm=2, msg=False)
X_fro_norm = norm_diff(X, norm='fro', msg=False)
n_comps = np.minimum(n_comps, np.min(X.shape))
label = "sklearn"
print("%s %d x %d - %s" %
(dataset_name, X.shape[0], X.shape[1], label))
U, s, V, time = svd_timing(X, n_comps, n_iter=2, n_oversamples=10,
method=label)
all_time[label].append(time)
if enable_spectral_norm:
A = U.dot(np.diag(s).dot(V))
all_spectral[label].append(norm_diff(X - A, norm=2) /
X_spectral_norm)
f = scalable_frobenius_norm_discrepancy(X, U, s, V)
all_frobenius[label].append(f / X_fro_norm)
if fbpca_available:
label = "fbpca"
print("%s %d x %d - %s" %
(dataset_name, X.shape[0], X.shape[1], label))
U, s, V, time = svd_timing(X, n_comps, n_iter=2, n_oversamples=2,
method=label)
all_time[label].append(time)
if enable_spectral_norm:
A = U.dot(np.diag(s).dot(V))
all_spectral[label].append(norm_diff(X - A, norm=2) /
X_spectral_norm)
f = scalable_frobenius_norm_discrepancy(X, U, s, V)
all_frobenius[label].append(f / X_fro_norm)
if len(all_time) == 0:
raise ValueError("No tests ran. Aborting.")
if enable_spectral_norm:
title = "normalized spectral norm diff vs running time"
scatter_time_vs_s(all_time, all_spectral, datasets, title)
title = "normalized Frobenius norm diff vs running time"
scatter_time_vs_s(all_time, all_frobenius, datasets, title)
if __name__ == '__main__':
random_state = check_random_state(1234)
power_iter = np.linspace(0, 6, 7, dtype=int)
n_comps = 50
for dataset_name in datasets:
X = get_data(dataset_name)
if X is None:
continue
print(" >>>>>> Benching sklearn and fbpca on %s %d x %d" %
(dataset_name, X.shape[0], X.shape[1]))
bench_a(X, dataset_name, power_iter, n_oversamples=2,
n_comps=np.minimum(n_comps, np.min(X.shape)))
print(" >>>>>> Benching on simulated low rank matrix with variable rank")
bench_b(power_iter)
print(" >>>>>> Benching sklearn and fbpca default configurations")
bench_c(datasets + big_sparse_datasets, n_comps)
plt.show()
| bsd-3-clause |
glouppe/scikit-learn | benchmarks/bench_random_projections.py | 397 | 8900 | """
===========================
Random projection benchmark
===========================
Benchmarks for random projections.
"""
from __future__ import division
from __future__ import print_function
import gc
import sys
import optparse
from datetime import datetime
import collections
import numpy as np
import scipy.sparse as sp
from sklearn import clone
from sklearn.externals.six.moves import xrange
from sklearn.random_projection import (SparseRandomProjection,
GaussianRandomProjection,
johnson_lindenstrauss_min_dim)
def type_auto_or_float(val):
if val == "auto":
return "auto"
else:
return float(val)
def type_auto_or_int(val):
if val == "auto":
return "auto"
else:
return int(val)
def compute_time(t_start, delta):
mu_second = 0.0 + 10 ** 6 # number of microseconds in a second
return delta.seconds + delta.microseconds / mu_second
def bench_scikit_transformer(X, transfomer):
gc.collect()
clf = clone(transfomer)
# start time
t_start = datetime.now()
clf.fit(X)
delta = (datetime.now() - t_start)
# stop time
time_to_fit = compute_time(t_start, delta)
# start time
t_start = datetime.now()
clf.transform(X)
delta = (datetime.now() - t_start)
# stop time
time_to_transform = compute_time(t_start, delta)
return time_to_fit, time_to_transform
# Make some random data with uniformly located non zero entries with
# Gaussian distributed values
def make_sparse_random_data(n_samples, n_features, n_nonzeros,
random_state=None):
rng = np.random.RandomState(random_state)
data_coo = sp.coo_matrix(
(rng.randn(n_nonzeros),
(rng.randint(n_samples, size=n_nonzeros),
rng.randint(n_features, size=n_nonzeros))),
shape=(n_samples, n_features))
return data_coo.toarray(), data_coo.tocsr()
def print_row(clf_type, time_fit, time_transform):
print("%s | %s | %s" % (clf_type.ljust(30),
("%.4fs" % time_fit).center(12),
("%.4fs" % time_transform).center(12)))
if __name__ == "__main__":
###########################################################################
# Option parser
###########################################################################
op = optparse.OptionParser()
op.add_option("--n-times",
dest="n_times", default=5, type=int,
help="Benchmark results are average over n_times experiments")
op.add_option("--n-features",
dest="n_features", default=10 ** 4, type=int,
help="Number of features in the benchmarks")
op.add_option("--n-components",
dest="n_components", default="auto",
help="Size of the random subspace."
" ('auto' or int > 0)")
op.add_option("--ratio-nonzeros",
dest="ratio_nonzeros", default=10 ** -3, type=float,
help="Number of features in the benchmarks")
op.add_option("--n-samples",
dest="n_samples", default=500, type=int,
help="Number of samples in the benchmarks")
op.add_option("--random-seed",
dest="random_seed", default=13, type=int,
help="Seed used by the random number generators.")
op.add_option("--density",
dest="density", default=1 / 3,
help="Density used by the sparse random projection."
" ('auto' or float (0.0, 1.0]")
op.add_option("--eps",
dest="eps", default=0.5, type=float,
help="See the documentation of the underlying transformers.")
op.add_option("--transformers",
dest="selected_transformers",
default='GaussianRandomProjection,SparseRandomProjection',
type=str,
help="Comma-separated list of transformer to benchmark. "
"Default: %default. Available: "
"GaussianRandomProjection,SparseRandomProjection")
op.add_option("--dense",
dest="dense",
default=False,
action="store_true",
help="Set input space as a dense matrix.")
(opts, args) = op.parse_args()
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
opts.n_components = type_auto_or_int(opts.n_components)
opts.density = type_auto_or_float(opts.density)
selected_transformers = opts.selected_transformers.split(',')
###########################################################################
# Generate dataset
###########################################################################
n_nonzeros = int(opts.ratio_nonzeros * opts.n_features)
print('Dataset statics')
print("===========================")
print('n_samples \t= %s' % opts.n_samples)
print('n_features \t= %s' % opts.n_features)
if opts.n_components == "auto":
print('n_components \t= %s (auto)' %
johnson_lindenstrauss_min_dim(n_samples=opts.n_samples,
eps=opts.eps))
else:
print('n_components \t= %s' % opts.n_components)
print('n_elements \t= %s' % (opts.n_features * opts.n_samples))
print('n_nonzeros \t= %s per feature' % n_nonzeros)
print('ratio_nonzeros \t= %s' % opts.ratio_nonzeros)
print('')
###########################################################################
# Set transformer input
###########################################################################
transformers = {}
###########################################################################
# Set GaussianRandomProjection input
gaussian_matrix_params = {
"n_components": opts.n_components,
"random_state": opts.random_seed
}
transformers["GaussianRandomProjection"] = \
GaussianRandomProjection(**gaussian_matrix_params)
###########################################################################
# Set SparseRandomProjection input
sparse_matrix_params = {
"n_components": opts.n_components,
"random_state": opts.random_seed,
"density": opts.density,
"eps": opts.eps,
}
transformers["SparseRandomProjection"] = \
SparseRandomProjection(**sparse_matrix_params)
###########################################################################
# Perform benchmark
###########################################################################
time_fit = collections.defaultdict(list)
time_transform = collections.defaultdict(list)
print('Benchmarks')
print("===========================")
print("Generate dataset benchmarks... ", end="")
X_dense, X_sparse = make_sparse_random_data(opts.n_samples,
opts.n_features,
n_nonzeros,
random_state=opts.random_seed)
X = X_dense if opts.dense else X_sparse
print("done")
for name in selected_transformers:
print("Perform benchmarks for %s..." % name)
for iteration in xrange(opts.n_times):
print("\titer %s..." % iteration, end="")
time_to_fit, time_to_transform = bench_scikit_transformer(X_dense,
transformers[name])
time_fit[name].append(time_to_fit)
time_transform[name].append(time_to_transform)
print("done")
print("")
###########################################################################
# Print results
###########################################################################
print("Script arguments")
print("===========================")
arguments = vars(opts)
print("%s \t | %s " % ("Arguments".ljust(16),
"Value".center(12),))
print(25 * "-" + ("|" + "-" * 14) * 1)
for key, value in arguments.items():
print("%s \t | %s " % (str(key).ljust(16),
str(value).strip().center(12)))
print("")
print("Transformer performance:")
print("===========================")
print("Results are averaged over %s repetition(s)." % opts.n_times)
print("")
print("%s | %s | %s" % ("Transformer".ljust(30),
"fit".center(12),
"transform".center(12)))
print(31 * "-" + ("|" + "-" * 14) * 2)
for name in sorted(selected_transformers):
print_row(name,
np.mean(time_fit[name]),
np.mean(time_transform[name]))
print("")
print("")
| bsd-3-clause |
petosegan/scikit-learn | examples/cluster/plot_cluster_comparison.py | 246 | 4684 | """
=========================================================
Comparing different clustering algorithms on toy datasets
=========================================================
This example aims at showing characteristics of different
clustering algorithms on datasets that are "interesting"
but still in 2D. The last dataset is an example of a 'null'
situation for clustering: the data is homogeneous, and
there is no good clustering.
While these examples give some intuition about the algorithms,
this intuition might not apply to very high dimensional data.
The results could be improved by tweaking the parameters for
each clustering strategy, for instance setting the number of
clusters for the methods that needs this parameter
specified. Note that affinity propagation has a tendency to
create many clusters. Thus in this example its two parameters
(damping and per-point preference) were set to to mitigate this
behavior.
"""
print(__doc__)
import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn import cluster, datasets
from sklearn.neighbors import kneighbors_graph
from sklearn.preprocessing import StandardScaler
np.random.seed(0)
# Generate datasets. We choose the size big enough to see the scalability
# of the algorithms, but not too big to avoid too long running times
n_samples = 1500
noisy_circles = datasets.make_circles(n_samples=n_samples, factor=.5,
noise=.05)
noisy_moons = datasets.make_moons(n_samples=n_samples, noise=.05)
blobs = datasets.make_blobs(n_samples=n_samples, random_state=8)
no_structure = np.random.rand(n_samples, 2), None
colors = np.array([x for x in 'bgrcmykbgrcmykbgrcmykbgrcmyk'])
colors = np.hstack([colors] * 20)
clustering_names = [
'MiniBatchKMeans', 'AffinityPropagation', 'MeanShift',
'SpectralClustering', 'Ward', 'AgglomerativeClustering',
'DBSCAN', 'Birch']
plt.figure(figsize=(len(clustering_names) * 2 + 3, 9.5))
plt.subplots_adjust(left=.02, right=.98, bottom=.001, top=.96, wspace=.05,
hspace=.01)
plot_num = 1
datasets = [noisy_circles, noisy_moons, blobs, no_structure]
for i_dataset, dataset in enumerate(datasets):
X, y = dataset
# normalize dataset for easier parameter selection
X = StandardScaler().fit_transform(X)
# estimate bandwidth for mean shift
bandwidth = cluster.estimate_bandwidth(X, quantile=0.3)
# connectivity matrix for structured Ward
connectivity = kneighbors_graph(X, n_neighbors=10, include_self=False)
# make connectivity symmetric
connectivity = 0.5 * (connectivity + connectivity.T)
# create clustering estimators
ms = cluster.MeanShift(bandwidth=bandwidth, bin_seeding=True)
two_means = cluster.MiniBatchKMeans(n_clusters=2)
ward = cluster.AgglomerativeClustering(n_clusters=2, linkage='ward',
connectivity=connectivity)
spectral = cluster.SpectralClustering(n_clusters=2,
eigen_solver='arpack',
affinity="nearest_neighbors")
dbscan = cluster.DBSCAN(eps=.2)
affinity_propagation = cluster.AffinityPropagation(damping=.9,
preference=-200)
average_linkage = cluster.AgglomerativeClustering(
linkage="average", affinity="cityblock", n_clusters=2,
connectivity=connectivity)
birch = cluster.Birch(n_clusters=2)
clustering_algorithms = [
two_means, affinity_propagation, ms, spectral, ward, average_linkage,
dbscan, birch]
for name, algorithm in zip(clustering_names, clustering_algorithms):
# predict cluster memberships
t0 = time.time()
algorithm.fit(X)
t1 = time.time()
if hasattr(algorithm, 'labels_'):
y_pred = algorithm.labels_.astype(np.int)
else:
y_pred = algorithm.predict(X)
# plot
plt.subplot(4, len(clustering_algorithms), plot_num)
if i_dataset == 0:
plt.title(name, size=18)
plt.scatter(X[:, 0], X[:, 1], color=colors[y_pred].tolist(), s=10)
if hasattr(algorithm, 'cluster_centers_'):
centers = algorithm.cluster_centers_
center_colors = colors[:len(centers)]
plt.scatter(centers[:, 0], centers[:, 1], s=100, c=center_colors)
plt.xlim(-2, 2)
plt.ylim(-2, 2)
plt.xticks(())
plt.yticks(())
plt.text(.99, .01, ('%.2fs' % (t1 - t0)).lstrip('0'),
transform=plt.gca().transAxes, size=15,
horizontalalignment='right')
plot_num += 1
plt.show()
| bsd-3-clause |
PanDAWMS/pilot | RunJob.py | 1 | 98438 | # Class definition:
# RunJob
# This is the main RunJob class; RunJobEvent etc will inherit from this class
# Note: at the moment, this class is essentially the old runJob module turned object oriented.
# The class will later become RunJobNormal, ie responible for running normal PanDA jobs.
# At that point a new RunJob top class will be created containing methods that have been
# identified as common between the various sub classes.
# Instances are generated with RunJobFactory
# Subclasses should implement all needed methods prototyped in this class
# Note: not compatible with Singleton Design Pattern due to the subclassing
# Standard python modules
import os, sys, commands, time
import traceback
import atexit, signal
from optparse import OptionParser
from json import loads
# Pilot modules
import Site, pUtil, Job, Node, RunJobUtilities
import Mover as mover
from pUtil import tolog, readpar, createLockFile, getDatasetDict, getSiteInformation,\
tailPilotErrorDiag, getCmtconfig, getExperiment, getGUID, getWriteToInputFilenames
from JobRecovery import JobRecovery
from FileStateClient import updateFileStates, dumpFileStates
from ErrorDiagnosis import ErrorDiagnosis # import here to avoid issues seen at BU with missing module
from PilotErrors import PilotErrors
from shutil import copy2
from FileHandling import tail, getExtension, extractOutputFiles, getDestinationDBlockItems, getDirectAccess, writeFile, readFile
from EventRanges import downloadEventRanges
from processes import get_cpu_consumption_time
# remove logguid, debuglevel - not needed
# relabelled -h, queuename to -b (debuglevel not used)
class RunJob(object):
# private data members
__runjob = "RunJob" # String defining the RunJob class
__instance = None # Boolean used by subclasses to become a Singleton
__error = PilotErrors() # PilotErrors object
# __appdir = "/usatlas/projects/OSG" # Default software installation directory
# __debugLevel = 0 # 0: debug info off, 1: display function name when called, 2: full debug info
__failureCode = None # set by signal handler when user/batch system kills the job
__globalPilotErrorDiag = "" # global pilotErrorDiag used with signal handler (only)
__globalErrorCode = 0 # global error code used with signal handler (only)
__inputDir = "" # location of input files (source for mv site mover)
__logguid = None # guid for the log file
__outputDir = "" # location of output files (destination for mv site mover)
__pilot_initdir = "" # location of where the pilot is untarred and started
__pilotlogfilename = "pilotlog.txt" # default pilotlog filename
__pilotserver = "localhost" # default server
__pilotport = 88888 # default port
__proxycheckFlag = True # True (default): perform proxy validity checks, False: no check
__pworkdir = "/tmp" # site work dir used by the parent
# __queuename = "" # PanDA queue NOT NEEDED
# __sitename = "testsite" # PanDA site NOT NEEDED
__stageinretry = 1 # number of stage-in tries
__stageoutretry = 1 # number of stage-out tries
# __testLevel = 0 # test suite control variable (0: no test, 1: put error, 2: ...) NOT USED
# __workdir = "/tmp" # NOT USED
__cache = "" # Cache URL, e.g. used by LSST
__pandaserver = "" # Full PanDA server url incl. port and sub dirs
__recovery = False
__jobStateFile = None
__yodaNodes = None
__yodaQueue = None
corruptedFiles = []
# Getter and setter methods
def getExperiment(self):
""" Getter for __experiment """
return self.__experiment
def getFailureCode(self):
""" Getter for __failureCode """
return self.__failureCode
def setFailureCode(self, code):
""" Setter for __failureCode """
self.__failureCode = code
def getGlobalPilotErrorDiag(self):
""" Getter for __globalPilotErrorDiag """
return self.__globalPilotErrorDiag
def setGlobalPilotErrorDiag(self, pilotErrorDiag):
""" Setter for __globalPilotErrorDiag """
self.__globalPilotErrorDiag = pilotErrorDiag
def getGlobalErrorCode(self):
""" Getter for __globalErrorCode """
return self.__globalErrorCode
def setGlobalErrorCode(self, code):
""" Setter for __globalErrorCode """
self.__globalErrorCode = code
def setCache(self, cache):
""" Setter for __cache """
self.__cache = cache
def getInputDir(self):
""" Getter for __inputDir """
return self.__inputDir
def setInputDir(self, inputDir):
""" Setter for __inputDir """
self.__inputDir = inputDir
def getLogGUID(self):
""" Getter for __logguid """
return self.__logguid
def getOutputDir(self):
""" Getter for __outputDir """
return self.__outputDir
def getPilotInitDir(self):
""" Getter for __pilot_initdir """
return self.__pilot_initdir
def setPilotInitDir(self, pilot_initdir):
""" Setter for __pilot_initdir """
self.__pilot_initdir = pilot_initdir
def getPilotLogFilename(self):
""" Getter for __pilotlogfilename """
return self.__pilotlogfilename
def getPilotServer(self):
""" Getter for __pilotserver """
return self.__pilotserver
def getPilotPort(self):
""" Getter for __pilotport """
return self.__pilotport
def getProxyCheckFlag(self):
""" Getter for __proxycheckFlag """
return self.__proxycheckFlag
def getParentWorkDir(self):
""" Getter for __pworkdir """
return self.__pworkdir
def setParentWorkDir(self, pworkdir):
""" Setter for __pworkdir """
self.__pworkdir = pworkdir
def getStageInRetry(self):
""" Getter for __stageinretry """
return self.__stageinretry
def getStageOutRetry(self):
""" Getter for __stageoutretry """
return self.__stageoutretry
def setStageInRetry(self, stageinretry):
""" Setter for __stageinretry """
self.__stageinretry = stageinretry
def getCache(self):
""" Getter for __cache """
return self.__cache
def getRecovery(self):
return self.__recovery
def getJobStateFile(self):
return self.__jobStateFile
def setLogGUID(self, logguid):
""" Setter for __logguid """
self.__logguid = logguid
def getYodaNodes(self):
try:
if self.__yodaNodes is None:
return None
nodes = int(self.__yodaNodes)
return nodes
except:
tolog(traceback.format_exc())
return None
def getYodaQueue(self):
try:
if self.__yodaQueue is None:
return None
return self.__yodaQueue
except:
tolog(traceback.format_exc())
return None
def getPanDAServer(self):
""" Getter for __pandaserver """
return self.__pandaserver
def setPanDAServer(self, pandaserver):
""" Setter for __pandaserver """
self.__pandaserver = pandaserver
# Required methods
def __init__(self):
""" Default initialization """
# e.g. self.__errorLabel = errorLabel
pass
def getRunJob(self):
""" Return a string with the module name """
return self.__runjob
def argumentParser(self):
""" Argument parser for the RunJob module """
# Return variables
appdir = None
queuename = None
sitename = None
workdir = None
parser = OptionParser()
parser.add_option("-a", "--appdir", dest="appdir",
help="The local path to the applications directory", metavar="APPDIR")
parser.add_option("-b", "--queuename", dest="queuename",
help="Queue name", metavar="QUEUENAME")
parser.add_option("-d", "--workdir", dest="workdir",
help="The local path to the working directory of the payload", metavar="WORKDIR")
parser.add_option("-g", "--inputdir", dest="inputDir",
help="Location of input files to be transferred by the mv site mover", metavar="INPUTDIR")
parser.add_option("-i", "--logfileguid", dest="logguid",
help="Log file guid", metavar="GUID")
parser.add_option("-k", "--pilotlogfilename", dest="pilotlogfilename",
help="The name of the pilot log file", metavar="PILOTLOGFILENAME")
parser.add_option("-l", "--pilotinitdir", dest="pilot_initdir",
help="The local path to the directory where the pilot was launched", metavar="PILOT_INITDIR")
parser.add_option("-m", "--outputdir", dest="outputDir",
help="Destination of output files to be transferred by the mv site mover", metavar="OUTPUTDIR")
parser.add_option("-o", "--parentworkdir", dest="pworkdir",
help="Path to the work directory of the parent process (i.e. the pilot)", metavar="PWORKDIR")
parser.add_option("-s", "--sitename", dest="sitename",
help="The name of the site where the job is to be run", metavar="SITENAME")
parser.add_option("-w", "--pilotserver", dest="pilotserver",
help="The URL of the pilot TCP server (localhost) WILL BE RETIRED", metavar="PILOTSERVER")
parser.add_option("-p", "--pilotport", dest="pilotport",
help="Pilot TCP server port (default: 88888)", metavar="PORT")
parser.add_option("-t", "--proxycheckflag", dest="proxycheckFlag",
help="True (default): perform proxy validity checks, False: no check", metavar="PROXYCHECKFLAG")
parser.add_option("-x", "--stageinretries", dest="stageinretry",
help="The number of stage-in retries", metavar="STAGEINRETRY")
#parser.add_option("-B", "--filecatalogregistration", dest="fileCatalogRegistration",
# help="True (default): perform file catalog registration, False: no catalog registration", metavar="FILECATALOGREGISTRATION")
parser.add_option("-E", "--stageoutretries", dest="stageoutretry",
help="The number of stage-out retries", metavar="STAGEOUTRETRY")
parser.add_option("-F", "--experiment", dest="experiment",
help="Current experiment (default: ATLAS)", metavar="EXPERIMENT")
parser.add_option("-R", "--recovery", dest="recovery",
help="Run in recovery mode", metavar="RECOVERY")
parser.add_option("-S", "--jobStateFile", dest="jobStateFile",
help="Job State File", metavar="JOBSTATEFILE")
parser.add_option("-N", "--yodaNodes", dest="yodaNodes",
help="Maximum nodes Yoda starts with", metavar="YODANODES")
parser.add_option("-Q", "--yodaQueue", dest="yodaQueue",
help="The queue yoda will be send to", metavar="YODAQUEUE")
parser.add_option("-H", "--cache", dest="cache",
help="Cache URL", metavar="CACHE")
parser.add_option("-W", "--pandaserver", dest="pandaserver",
help="The full URL of the PanDA server (incl. port)", metavar="PANDASERVER")
# options = {'experiment': 'ATLAS'}
try:
(options, args) = parser.parse_args()
except Exception,e:
tolog("!!WARNING!!3333!! Exception caught:" % (e))
print options.experiment
else:
if options.appdir:
# self.__appdir = options.appdir
appdir = options.appdir
if options.experiment:
self.__experiment = options.experiment
if options.logguid:
self.__logguid = options.logguid
if options.inputDir:
self.__inputDir = options.inputDir
if options.pilot_initdir:
self.__pilot_initdir = options.pilot_initdir
if options.pilotlogfilename:
self.__pilotlogfilename = options.pilotlogfilename
if options.pilotserver:
self.__pilotserver = options.pilotserver
if options.pandaserver:
self.__pandaserver = options.pandaserver
if options.proxycheckFlag:
if options.proxycheckFlag.lower() == "false":
self.__proxycheckFlag = False
else:
self.__proxycheckFlag = True
else:
self.__proxycheckFlag = True
if options.pworkdir:
self.__pworkdir = options.pworkdir
if options.outputDir:
self.__outputDir = options.outputDir
if options.pilotport:
try:
self.__pilotport = int(options.pilotport)
except Exception, e:
tolog("!!WARNING!!3232!! Exception caught: %s" % (e))
# self.__queuename is not needed
if options.queuename:
queuename = options.queuename
if options.sitename:
sitename = options.sitename
if options.stageinretry:
try:
self.__stageinretry = int(options.stageinretry)
except Exception, e:
tolog("!!WARNING!!3232!! Exception caught: %s" % (e))
if options.stageoutretry:
try:
self.__stageoutretry = int(options.stageoutretry)
except Exception, e:
tolog("!!WARNING!!3232!! Exception caught: %s" % (e))
if options.workdir:
workdir = options.workdir
if options.cache:
self.__cache = options.cache
self.__recovery = options.recovery
self.__jobStateFile = options.jobStateFile
if options.yodaNodes:
self.__yodaNodes = options.yodaNodes
if options.yodaQueue:
self.__yodaQueue = options.yodaQueue
return sitename, appdir, workdir, queuename
def getRunJobFileName(self):
""" Return the filename of the module """
fullpath = sys.modules[self.__module__].__file__
# Note: the filename above will contain both full path, and might end with .pyc, fix this
filename = os.path.basename(fullpath)
if filename.endswith(".pyc"):
filename = filename[:-1] # remove the trailing 'c'
return filename
def allowLoopingJobKiller(self):
""" Should the pilot search for looping jobs? """
# The pilot has the ability to monitor the payload work directory. If there are no updated files within a certain
# time limit, the pilot will consider the as stuck (looping) and will kill it. The looping time limits are set
# in environment.py (see e.g. loopingLimitDefaultProd)
return True
def cleanup(self, job, rf=None):
""" Cleanup function """
# 'rf' is a list that will contain the names of the files that could be transferred
# In case of transfer problems, all remaining files will be found and moved
# to the data directory for later recovery.
try:
if int(job.result[1]) > 0 and (job.result[2] is None or job.result[2] == '' or int(job.result[2]) == 0):
job.result[2] = PilotErrors.ERR_RUNJOBEXC
except:
tolog(traceback.format_exc())
tolog("********************************************************")
tolog(" This job ended with (trf,pilot) exit code of (%d,%d)" % (job.result[1], job.result[2]))
tolog("********************************************************")
# clean up the pilot wrapper modules
pUtil.removePyModules(job.workdir)
if os.path.isdir(job.workdir):
os.chdir(job.workdir)
# remove input files from the job workdir
remFiles = job.inFiles
for inf in remFiles:
if inf and inf != 'NULL' and os.path.isfile("%s/%s" % (job.workdir, inf)): # non-empty string and not NULL
try:
os.remove("%s/%s" % (job.workdir, inf))
except Exception,e:
tolog("!!WARNING!!3000!! Ignore this Exception when deleting file %s: %s" % (inf, str(e)))
pass
# only remove output files if status is not 'holding'
# in which case the files should be saved for the job recovery.
# the job itself must also have finished with a zero trf error code
# (data will be moved to another directory to keep it out of the log file)
# always copy the metadata-<jobId>.xml to the site work dir
# WARNING: this metadata file might contain info about files that were not successfully moved to the SE
# it will be regenerated by the job recovery for the cases where there are output files in the datadir
try:
tolog('job.workdir is %s pworkdir is %s ' % (job.workdir, self.__pworkdir))
copy2("%s/metadata-%s.xml" % (job.workdir, job.jobId), "%s/metadata-%s.xml" % (self.__pworkdir, job.jobId))
except Exception, e:
tolog("Warning: Could not copy metadata-%s.xml to site work dir - ddm Adder problems will occure in case of job recovery" % (job.jobId))
tolog('job.workdir is %s pworkdir is %s ' % (job.workdir, self.__pworkdir))
if job.result[0] == 'holding' and job.result[1] == 0:
try:
# create the data directory
os.makedirs(job.datadir)
except OSError, e:
tolog("!!WARNING!!3000!! Could not create data directory: %s, %s" % (job.datadir, str(e)))
else:
# find all remaining files in case 'rf' is not empty
remaining_files = []
moved_files_list = []
try:
if rf != None:
moved_files_list = RunJobUtilities.getFileNamesFromString(rf[1])
remaining_files = RunJobUtilities.getRemainingFiles(moved_files_list, job.outFiles)
except Exception, e:
tolog("!!WARNING!!3000!! Illegal return value from Mover: %s, %s" % (str(rf), str(e)))
remaining_files = job.outFiles
# move all remaining output files to the data directory
nr_moved = 0
for _file in remaining_files:
try:
os.system("mv %s %s" % (_file, job.datadir))
except OSError, e:
tolog("!!WARNING!!3000!! Failed to move file %s (abort all)" % (_file))
break
else:
nr_moved += 1
tolog("Moved %d/%d output file(s) to: %s" % (nr_moved, len(remaining_files), job.datadir))
# remove all successfully copied files from the local directory
nr_removed = 0
for _file in moved_files_list:
try:
os.system("rm %s" % (_file))
except OSError, e:
tolog("!!WARNING!!3000!! Failed to remove output file: %s, %s" % (_file, e))
else:
nr_removed += 1
tolog("Removed %d output file(s) from local dir" % (nr_removed))
# copy the PoolFileCatalog.xml for non build jobs
if not pUtil.isBuildJob(remaining_files):
_fname = os.path.join(job.workdir, "PoolFileCatalog.xml")
tolog("Copying %s to %s" % (_fname, job.datadir))
try:
copy2(_fname, job.datadir)
except Exception, e:
tolog("!!WARNING!!3000!! Could not copy PoolFileCatalog.xml to data dir - expect ddm Adder problems during job recovery")
# remove all remaining output files from the work directory
# (a successfully copied file should already have been removed by the Mover)
rem = False
for inf in job.outFiles:
if inf and inf != 'NULL' and os.path.isfile("%s/%s" % (job.workdir, inf)): # non-empty string and not NULL
try:
os.remove("%s/%s" % (job.workdir, inf))
except Exception,e:
tolog("!!WARNING!!3000!! Ignore this Exception when deleting file %s: %s" % (inf, str(e)))
pass
else:
tolog("Lingering output file removed: %s" % (inf))
rem = True
if not rem:
tolog("All output files already removed from local dir")
tolog("Payload cleanup has finished")
def sysExit(self, job, rf=None):
'''
wrapper around sys.exit
rs is the return string from Mover::put containing a list of files that were not transferred
'''
self.cleanup(job, rf=rf)
sys.stderr.close()
tolog("RunJob (payload wrapper) has finished")
# change to sys.exit?
os._exit(job.result[2]) # pilotExitCode, don't confuse this with the overall pilot exit code,
# which doesn't get reported back to panda server anyway
def failJob(self, transExitCode, pilotExitCode, job, ins=None, pilotErrorDiag=None, docleanup=True):
""" set the fail code and exit """
if docleanup:
self.cleanup(job, rf=None)
if job.eventServiceMerge:
if self.corruptedFiles:
job.corruptedFiles = ','.join([e['lfn'] for e in self.corruptedFiles])
job.result[2] = self.corruptedFiles[0]['status_code']
else:
pilotExitCode = PilotErrors.ERR_ESRECOVERABLE
job.setState(["failed", transExitCode, pilotExitCode])
if pilotErrorDiag:
job.pilotErrorDiag = pilotErrorDiag
tolog("Will now update local pilot TCP server")
rt = RunJobUtilities.updatePilotServer(job, self.__pilotserver, self.__pilotport, final=True)
if ins:
ec = pUtil.removeFiles(job.workdir, ins)
if docleanup:
self.sysExit(job)
def isMultiTrf(self, parameterList):
""" Will we execute multiple jobs? """
if len(parameterList) > 1:
multi_trf = True
else:
multi_trf = False
return multi_trf
def setup(self, job, jobSite, thisExperiment):
""" prepare the setup and get the run command list """
# start setup time counter
t0 = time.time()
ec = 0
runCommandList = []
# split up the job parameters to be able to loop over the tasks
jobParameterList = job.jobPars.split("\n")
jobHomePackageList = job.homePackage.split("\n")
jobTrfList = job.trf.split("\n")
job.release = thisExperiment.formatReleaseString(job.release)
releaseList = thisExperiment.getRelease(job.release)
tolog("Number of transformations to process: %s" % len(jobParameterList))
multi_trf = self.isMultiTrf(jobParameterList)
# verify that the multi-trf job is setup properly
ec, job.pilotErrorDiag, releaseList = RunJobUtilities.verifyMultiTrf(jobParameterList, jobHomePackageList, jobTrfList, releaseList)
if ec > 0:
return ec, runCommandList, job, multi_trf
os.chdir(jobSite.workdir)
tolog("Current job workdir is %s" % os.getcwd())
# setup the trf(s)
_i = 0
_stdout = job.stdout
_stderr = job.stderr
_first = True
for (_jobPars, _homepackage, _trf, _swRelease) in map(None, jobParameterList, jobHomePackageList, jobTrfList, releaseList):
tolog("Preparing setup %d/%d" % (_i + 1, len(jobParameterList)))
# reset variables
job.jobPars = _jobPars
job.homePackage = _homepackage
job.trf = _trf
job.release = _swRelease
if multi_trf:
job.stdout = _stdout.replace(".txt", "_%d.txt" % (_i + 1))
job.stderr = _stderr.replace(".txt", "_%d.txt" % (_i + 1))
# post process copysetup variable in case of directIn/useFileStager
_copysetup = readpar('copysetup')
_copysetupin = readpar('copysetupin')
if "--directIn" in job.jobPars or "--useFileStager" in job.jobPars or _copysetup.count('^') == 5 or _copysetupin.count('^') == 5:
# only need to update the queuedata file once
if _first:
RunJobUtilities.updateCopysetups(job.jobPars)
_first = False
# setup the trf
ec, job.pilotErrorDiag, cmd, job.spsetup, job.JEM, job.cmtconfig = thisExperiment.getJobExecutionCommand(job, jobSite, self.__pilot_initdir)
if ec > 0:
# setup failed
break
# add the setup command to the command list
runCommandList.append(cmd)
_i += 1
job.stdout = _stdout
job.stderr = _stderr
job.timeSetup = int(time.time() - t0)
tolog("Total setup time: %d s" % (job.timeSetup))
return ec, runCommandList, job, multi_trf
def stageIn_new(self,
job,
jobSite,
analysisJob=None, # not used: job.isAnalysisJob() should be used instead
files=None,
pfc_name="PoolFileCatalog.xml"):
"""
Perform the stage-in
Do transfer input files
new site movers based implementation workflow
"""
tolog("Preparing for get command [stageIn_new]")
infiles = [e.lfn for e in job.inData]
tolog("Input file(s): (%s in total)" % len(infiles))
for ind, lfn in enumerate(infiles, 1):
tolog("%s. %s" % (ind, lfn))
if not infiles:
tolog("No input files for this job .. skip stage-in")
return job, infiles, None, False
t0 = os.times()
job.result[2], job.pilotErrorDiag, _dummy, FAX_dictionary = mover.get_data_new(job, jobSite, stageinTries=self.__stageinretry, proxycheck=False, workDir=self.__pworkdir, pfc_name=pfc_name, files=files)
t1 = os.times()
# record failed stagein files
for e in job.inData:
if e.status == 'error':
failed_file = {'lfn': e.lfn, 'status': e.status, 'status_code': e.status_code, 'status_message': e.status_message}
self.corruptedFiles.append(failed_file)
job.timeStageIn = int(round(t1[4] - t0[4]))
usedFAXandDirectIO = FAX_dictionary.get('usedFAXandDirectIO', False)
statusPFCTurl = None
return job, infiles, statusPFCTurl, usedFAXandDirectIO
@mover.use_newmover(stageIn_new)
def stageIn(self, job, jobSite, analysisJob, pfc_name="PoolFileCatalog.xml", prefetcher=False):
""" Perform the stage-in """
ec = 0
statusPFCTurl = None
usedFAXandDirectIO = False
# Prepare the input files (remove non-valid names) if there are any
ins, job.filesizeIn, job.checksumIn = RunJobUtilities.prepareInFiles(job.inFiles, job.filesizeIn, job.checksumIn)
if ins and not prefetcher:
tolog("Preparing for get command")
# Get the file access info (only useCT is needed here)
si = getSiteInformation(self.getExperiment())
useCT, oldPrefix, newPrefix = si.getFileAccessInfo(job.transferType)
# Transfer input files
tin_0 = os.times()
ec, job.pilotErrorDiag, statusPFCTurl, FAX_dictionary = \
mover.get_data(job, jobSite, ins, self.__stageinretry, analysisJob=analysisJob, usect=useCT,\
pinitdir=self.__pilot_initdir, proxycheck=False, inputDir=self.__inputDir, workDir=self.__pworkdir, pfc_name=pfc_name)
if ec != 0:
job.result[2] = ec
tin_1 = os.times()
job.timeStageIn = int(round(tin_1[4] - tin_0[4]))
# Extract any FAX info from the dictionary
job.filesWithoutFAX = FAX_dictionary.get('N_filesWithoutFAX', 0)
job.filesWithFAX = FAX_dictionary.get('N_filesWithFAX', 0)
job.bytesWithoutFAX = FAX_dictionary.get('bytesWithoutFAX', 0)
job.bytesWithFAX = FAX_dictionary.get('bytesWithFAX', 0)
usedFAXandDirectIO = FAX_dictionary.get('usedFAXandDirectIO', False)
elif prefetcher:
tolog("No need to stage in files since prefetcher will be used")
return job, ins, statusPFCTurl, usedFAXandDirectIO
def getTrfExitInfo(self, exitCode, workdir):
""" Get the trf exit code and info from job report if possible """
exitAcronym = ""
exitMsg = ""
# does the job report exist?
extension = getExtension(alternative='pickle')
if extension.lower() == "json":
_filename = "jobReport.%s" % (extension)
else:
_filename = "jobReportExtract.%s" % (extension)
filename = os.path.join(workdir, _filename)
if os.path.exists(filename):
tolog("Found job report: %s" % (filename))
# wait a few seconds to make sure the job report is finished
tolog("Taking a 5s nap to make sure the job report is finished")
time.sleep(5)
# first backup the jobReport to the job workdir since it will be needed later
# (the current location will disappear since it will be tarred up in the jobs' log file)
d = os.path.join(workdir, '..')
try:
copy2(filename, os.path.join(d, _filename))
except Exception, e:
tolog("Warning: Could not backup %s to %s: %s" % (_filename, d, e))
else:
tolog("Backed up %s to %s" % (_filename, d))
# search for the exit code
try:
f = open(filename, "r")
except Exception, e:
tolog("!!WARNING!!1112!! Failed to open job report: %s" % (e))
else:
if extension.lower() == "json":
from json import load
else:
from pickle import load
data = load(f)
# extract the exit code and info
_exitCode = self.extractDictionaryObject("exitCode", data)
if _exitCode:
if _exitCode == 0 and exitCode != 0:
tolog("!!WARNING!!1111!! Detected inconsistency in %s: exitcode listed as 0 but original trf exit code was %d (using original error code)" %\
(filename, exitCode))
else:
exitCode = _exitCode
_exitAcronym = self.extractDictionaryObject("exitAcronym", data)
if _exitAcronym:
exitAcronym = _exitAcronym
_exitMsg = self.extractDictionaryObject("exitMsg", data)
if _exitMsg:
exitMsg = _exitMsg
f.close()
tolog("Trf exited with:")
tolog("...exitCode=%d" % (exitCode))
tolog("...exitAcronym=%s" % (exitAcronym))
tolog("...exitMsg=%s" % (exitMsg))
else:
tolog("Job report not found: %s" % (filename))
return exitCode, exitAcronym, exitMsg
def extractDictionaryObject(self, obj, dictionary):
""" Extract an object from a dictionary """
_obj = None
try:
_obj = dictionary[obj]
except Exception, e:
tolog("Object %s not found in dictionary" % (obj))
else:
tolog('Extracted \"%s\"=%s from dictionary' % (obj, _obj))
return _obj
def getUtilitySubprocess(self, thisExperiment, cmd, pid, job):
""" Return/execute the utility subprocess if required """
utility_subprocess = None
if thisExperiment.shouldExecuteUtility():
try:
mem_cmd = thisExperiment.getUtilityCommand(job_command=cmd, pid=pid, release=job.release, homePackage=job.homePackage, cmtconfig=job.cmtconfig, trf=job.trf, workdir=job.workdir)
if mem_cmd != "":
utility_subprocess = self.getSubprocess(thisExperiment, mem_cmd)
if utility_subprocess:
try:
tolog("Process id of utility: %d" % (utility_subprocess.pid))
except Exception, e:
tolog("!!WARNING!!3436!! Exception caught: %s" % (e))
else:
tolog("Could not launch utility since the command path does not exist")
except Exception, e:
tolog("!!WARNING!!5454!! Exception caught: %s" % (e))
utility_subprocess = None
else:
tolog("Not required to run utility")
return utility_subprocess
def getBenchmarkSubprocess(self, node, coreCount, workdir, sitename):
""" Return/execute the benchmark subprocess if required """
# Output json: /tmp/cern-benchmark_$USER/bmk_tmp/result_profile.json
benchmark_subprocess = None
# run benchmark test if required by experiment site information object
si = getSiteInformation(self.getExperiment())
if si.shouldExecuteBenchmark():
thisExperiment = getExperiment(self.getExperiment())
cmd = si.getBenchmarkCommand(cloud=readpar('cloud'), cores=coreCount, workdir=workdir)
benchmark_subprocess = self.getSubprocess(thisExperiment, cmd)
if benchmark_subprocess:
try:
tolog("Process id of benchmark suite: %d" % (benchmark_subprocess.pid))
except Exception, e:
tolog("!!WARNING!!3436!! Exception caught: %s" % (e))
else:
tolog("Not required to run the benchmark suite")
return benchmark_subprocess
def isDirectAccess(self, analysisJob, transferType=None):
""" determine if direct access should be used """
directIn, directInType = getDirectAccess()
if not analysisJob and transferType and transferType != "direct":
directIn = False
return directIn
def replaceLFNsWithTURLs(self, cmd, fname, inFiles, workdir, writetofile=""):
"""
Replace all LFNs with full TURLs.
This function is used with direct access. Athena requires a full TURL instead of LFN.
"""
tolog("inside replaceLFNsWithTURLs()")
turl_dictionary = {} # { LFN: TURL, ..}
if os.path.exists(fname):
file_info_dictionary = mover.getFileInfoDictionaryFromXML(fname)
tolog("file_info_dictionary=%s" % file_info_dictionary)
for inputFile in inFiles:
if inputFile in file_info_dictionary:
turl = file_info_dictionary[inputFile][0]
turl_dictionary[inputFile] = turl
if inputFile in cmd:
if turl.startswith('root://') and turl not in cmd:
cmd = cmd.replace(inputFile, turl)
tolog("Replaced '%s' with '%s' in the run command" % (inputFile, turl))
else:
tolog("!!WARNING!!3434!! inputFile=%s not in dictionary=%s" % (inputFile, file_info_dictionary))
tolog("writetofile=%s" % writetofile)
tolog("turl_dictionary=%s" % turl_dictionary)
# replace the LFNs with TURLs in the writeToFile input file list (if it exists)
if writetofile and turl_dictionary:
filenames = getWriteToInputFilenames(writetofile)
tolog("filenames=%s" % filenames)
for fname in filenames:
new_lines = []
path = os.path.join(workdir, fname)
if os.path.exists(path):
f = readFile(path)
tolog("readFile=%s" % f)
for line in f.split('\n'):
fname = os.path.basename(line)
if fname in turl_dictionary:
turl = turl_dictionary[fname]
new_lines.append(turl)
else:
if line:
new_lines.append(line)
lines = '\n'.join(new_lines)
if lines:
writeFile(path, lines)
tolog("lines=%s" % lines)
else:
tolog("!!WARNING!!4546!! File does not exist: %s" % path)
else:
tolog("!!WARNING!!4545!! Could not find file: %s (cannot locate TURLs for direct access)" % fname)
return cmd
def executePayload(self, thisExperiment, runCommandList, job):
""" execute the payload """
# do not hide the proxy for PandaMover since it needs it or for sites that has sc.proxy = donothide
# if 'DDM' not in jobSite.sitename and readpar('proxy') != 'donothide':
# # create the proxy guard object (must be created here before the sig2exc())
# proxyguard = ProxyGuard()
#
# # hide the proxy
# hP_ret = proxyguard.hideProxy()
# if not hP_ret:
# tolog("Warning: Proxy exposed to payload")
# If clone job, make sure that the events should be processed
if job.cloneJob == "runonce":
try:
# If the event is still available, the go ahead and run the payload
message = downloadEventRanges(job.jobId, job.jobsetID, job.taskID, url=self.__pandaserver)
# Create a list of event ranges from the downloaded message
event_ranges = self.extractEventRanges(message)
# Are there any event ranges?
if event_ranges == []:
tolog("!!WARNING!!2424!! This clone job was already executed")
exitMsg = "Already executed clone job"
res_tuple = (1, exitMsg)
res = (res_tuple[0], res_tuple[1], exitMsg)
job.result[0] = exitMsg
job.result[1] = 0 # transExitCode
job.result[2] = self.__error.ERR_EXECUTEDCLONEJOB # Pilot error code
return res, job, False, 0
else:
tolog("Ok to execute clone job")
except Exception, e:
tolog("!1WARNING!!2323!! Exception caught: %s" % (e))
# Run the payload process, which could take days to finish
t0 = os.times()
path = os.path.join(job.workdir, 't0_times.txt')
if writeFile(path, str(t0)):
tolog("Wrote %s to file %s" % (str(t0), path))
else:
tolog("!!WARNING!!3344!! Failed to write t0 to file, will not be able to calculate CPU consumption time on the fly")
res_tuple = (0, 'Undefined')
multi_trf = self.isMultiTrf(runCommandList)
_stdout = job.stdout
_stderr = job.stderr
# Loop over all run commands (only >1 for multi-trfs)
current_job_number = 0
getstatusoutput_was_interrupted = False
number_of_jobs = len(runCommandList)
for cmd in runCommandList:
current_job_number += 1
# Create the stdout/err files
if multi_trf:
job.stdout = _stdout.replace(".txt", "_%d.txt" % (current_job_number))
job.stderr = _stderr.replace(".txt", "_%d.txt" % (current_job_number))
file_stdout, file_stderr = self.getStdoutStderrFileObjects(stdoutName=job.stdout, stderrName=job.stderr)
if not (file_stdout and file_stderr):
res_tuple = (1, "Could not open stdout/stderr files, piping not possible")
tolog("!!WARNING!!2222!! %s" % (res_tuple[1]))
break
try:
# Add the full job command to the job_setup.sh file
to_script = cmd.replace(";", ";\n")
thisExperiment.updateJobSetupScript(job.workdir, to_script=to_script)
# For direct access in prod jobs, we need to substitute the input file names with the corresponding TURLs
try:
analysisJob = job.isAnalysisJob()
directIn = self.isDirectAccess(analysisJob, transferType=job.transferType)
tolog("analysisJob=%s" % analysisJob)
tolog("directIn=%s" % directIn)
if not analysisJob and directIn:
# replace the LFNs with TURLs in the job command
# (and update the writeToFile input file list if it exists)
_fname = os.path.join(job.workdir, "PoolFileCatalog.xml")
cmd = self.replaceLFNsWithTURLs(cmd, _fname, job.inFiles, job.workdir, writetofile=job.writetofile)
except Exception, e:
tolog("Caught exception: %s" % e)
tolog("Executing job command %d/%d" % (current_job_number, number_of_jobs))
# Hack to replace Archive_tf
# if job.trf == 'Archive_tf.py' or job.trf == 'Dummy_tf.py':
# cmd = 'sleep 1'
# tolog('Will execute a dummy sleep command instead of %s' % job.trf)
# Start the subprocess
main_subprocess = self.getSubprocess(thisExperiment, cmd, stdout=file_stdout, stderr=file_stderr)
if main_subprocess:
path = os.path.join(job.workdir, 'cpid.txt')
if writeFile(path, str(main_subprocess.pid)):
tolog("Wrote cpid=%s to file %s" % (main_subprocess.pid, path))
time.sleep(2)
# Start the utility if required
utility_subprocess = self.getUtilitySubprocess(thisExperiment, cmd, main_subprocess.pid, job)
utility_subprocess_launches = 1
# Loop until the main subprocess has finished
while main_subprocess.poll() is None:
# Take a short nap
time.sleep(5)
# Make sure that the utility subprocess is still running
if utility_subprocess:
# Take another short nap
time.sleep(5)
if not utility_subprocess.poll() is None:
# If poll() returns anything but None it means that the subprocess has ended - which it should not have done by itself
# Unless it was killed by the Monitor along with all other subprocesses
if not os.path.exists(os.path.join(job.workdir, "MEMORYEXCEEDED")) and not os.path.exists(os.path.join(job.workdir, "JOBWILLBEKILLED")):
if utility_subprocess_launches <= 5:
tolog("!!WARNING!!4343!! Dectected crashed utility subprocess - will restart it")
utility_subprocess = self.getUtilitySubprocess(thisExperiment, cmd, main_subprocess.pid, job)
utility_subprocess_launches += 1
elif utility_subprocess_launches <= 6:
tolog("!!WARNING!!4343!! Dectected crashed utility subprocess - too many restarts, will not restart again")
utility_subprocess_launches += 1
utility_subprocess = None
else:
pass
else:
tolog("Detected lockfile MEMORYEXCEEDED: will not restart utility")
utility_subprocess = None
# Stop the utility
if utility_subprocess:
utility_subprocess.send_signal(signal.SIGUSR1)
tolog("Terminated the utility subprocess")
_nap = 10
tolog("Taking a short nap (%d s) to allow the utility to finish writing to the summary file" % (_nap))
time.sleep(_nap)
# Copy the output JSON to the pilots init dir
_path = "%s/%s" % (job.workdir, thisExperiment.getUtilityJSONFilename())
if os.path.exists(_path):
try:
copy2(_path, self.__pilot_initdir)
except Exception, e:
tolog("!!WARNING!!2222!! Caught exception while trying to copy JSON files: %s" % (e))
else:
tolog("Copied %s to pilot init dir" % (_path))
else:
tolog("File %s was not created" % (_path))
# Handle main subprocess errors
try:
stdout = open(job.stdout, 'r')
if main_subprocess:
res_tuple = (main_subprocess.returncode, tail(stdout))
else:
res_tuple = (1, "Popen process does not exist (see stdout/err)")
except Exception, e:
tolog("!!WARNING!!3002!! Failed during tail operation: %s" % (e))
else:
tolog("Tail:\n%s" % (res_tuple[1]))
stdout.close()
else:
res_tuple = (1, "Popen ended prematurely (payload command failed to execute, see stdout/err)")
tolog("!!WARNING!!3001!! %s" % (res_tuple[1]))
except Exception, e:
tolog("!!WARNING!!3000!! Failed to run command: %s" % (e))
getstatusoutput_was_interrupted = True
if self.__failureCode:
job.result[2] = self.__failureCode
tolog("!!FAILED!!3000!! Failure code: %d" % (self.__failureCode))
break
else:
if res_tuple[0] == 0:
tolog("Job command %d/%d finished" % (current_job_number, number_of_jobs))
else:
tolog("Job command %d/%d failed: res = %s" % (current_job_number, number_of_jobs, str(res_tuple)))
break
t1 = os.times()
cpuconsumptiontime = get_cpu_consumption_time(t0)
job.cpuConsumptionTime = int(cpuconsumptiontime)
job.cpuConsumptionUnit = 's'
job.cpuConversionFactor = 1.0
tolog("Job CPU usage: %s %s" % (job.cpuConsumptionTime, job.cpuConsumptionUnit))
tolog("Job CPU conversion factor: %1.10f" % (job.cpuConversionFactor))
job.timeExe = int(round(t1[4] - t0[4]))
tolog("Original exit code: %d" % (res_tuple[0]))
tolog("Exit code: %d (returned from OS)" % (res_tuple[0]%255))
# check the job report for any exit code that should replace the res_tuple[0]
res0, exitAcronym, exitMsg = self.getTrfExitInfo(res_tuple[0], job.workdir)
res = (res0, res_tuple[1], exitMsg)
# dump an extract of the payload output
if number_of_jobs > 1:
_stdout = job.stdout
_stderr = job.stderr
_stdout = _stdout.replace(".txt", "_N.txt")
_stderr = _stderr.replace(".txt", "_N.txt")
tolog("NOTE: For %s output, see files %s, %s (N = [1, %d])" % (job.payload, _stdout, _stderr, number_of_jobs))
else:
tolog("NOTE: For %s output, see files %s, %s" % (job.payload, job.stdout, job.stderr))
# JEM job-end callback
try:
from JEMstub import notifyJobEnd2JEM
notifyJobEnd2JEM(job, tolog)
except:
pass # don't care
# restore the proxy
#if hP_ret:
# rP_ret = proxyguard.restoreProxy()
# if not rP_ret:
# tolog("Warning: Problems with storage can occur since proxy could not be restored")
# else:
# hP_ret = False
# tolog("ProxyGuard has finished successfully")
return res, job, getstatusoutput_was_interrupted, current_job_number
def moveTrfMetadata(self, workdir, jobId):
""" rename and copy the trf metadata """
oldMDName = "%s/metadata.xml" % (workdir)
_filename = "metadata-%s.xml.PAYLOAD" % (jobId)
newMDName = "%s/%s" % (workdir, _filename)
try:
os.rename(oldMDName, newMDName)
except:
tolog("Warning: Could not open the original %s file, but harmless, pass it" % (oldMDName))
pass
else:
tolog("Renamed %s to %s" % (oldMDName, newMDName))
# now move it to the pilot work dir
try:
copy2(newMDName, "%s/%s" % (self.__pworkdir, _filename))
except Exception, e:
tolog("Warning: Could not copy %s to site work dir: %s" % (_filename, str(e)))
else:
tolog("Metadata was transferred to site work dir: %s/%s" % (self.__pworkdir, _filename))
def createFileMetadata(self, outFiles, job, outsDict, dsname, datasetDict, sitename, analysisJob=False, fromJSON=False):
""" create the metadata for the output + log files """
# Note: if file names and guids were extracted from the jobReport.json file, then the getOutFilesGuids() should not be called
ec = 0
# get/assign guids to the output files
if outFiles:
if not pUtil.isBuildJob(outFiles) and not fromJSON:
ec, job.pilotErrorDiag, job.outFilesGuids = RunJobUtilities.getOutFilesGuids(job.outFiles, job.workdir, self.__experiment)
if ec:
# missing PoolFileCatalog (only error code from getOutFilesGuids)
return ec, job, None
else:
tolog("Will not use PoolFileCatalog to get guid")
else:
tolog("This job has no output files")
# get the file sizes and checksums for the local output files
# WARNING: any errors are lost if occur in getOutputFileInfo()
ec, pilotErrorDiag, fsize, checksum = pUtil.getOutputFileInfo(list(outFiles), "adler32", skiplog=True, logFile=job.logFile)
if ec != 0:
tolog("!!FAILED!!2999!! %s" % (pilotErrorDiag))
self.failJob(job.result[1], ec, job, pilotErrorDiag=pilotErrorDiag)
if job.tarFileGuid and len(job.tarFileGuid.strip()):
guid = job.tarFileGuid
elif self.__logguid:
guid = self.__logguid
else:
guid = job.tarFileGuid
# create preliminary metadata (no metadata yet about log file - added later in pilot.py)
_fname = "%s/metadata-%s.xml" % (job.workdir, job.jobId)
try:
_status = pUtil.PFCxml(job.experiment, _fname, list(job.outFiles), fguids=job.outFilesGuids, fntag="lfn", alog=job.logFile, alogguid=guid,\
fsize=fsize, checksum=checksum, analJob=analysisJob, logToOS=job.putLogToOS)
except Exception, e:
pilotErrorDiag = "PFCxml failed due to problematic XML: %s" % (e)
tolog("!!WARNING!!1113!! %s" % (pilotErrorDiag))
self.failJob(job.result[1], error.ERR_MISSINGGUID, job, pilotErrorDiag=pilotErrorDiag)
else:
if not _status:
pilotErrorDiag = "Missing guid(s) for output file(s) in metadata"
tolog("!!FAILED!!2999!! %s" % (pilotErrorDiag))
self.failJob(job.result[1], error.ERR_MISSINGGUID, job, pilotErrorDiag=pilotErrorDiag)
tolog("..............................................................................................................")
tolog("Created %s with:" % (_fname))
tolog(".. log : %s (to be transferred)" % (job.logFile))
tolog(".. log guid : %s" % (guid))
tolog(".. out files : %s" % str(job.outFiles))
tolog(".. out file guids : %s" % str(job.outFilesGuids))
tolog(".. fsize : %s" % str(fsize))
tolog(".. checksum : %s" % str(checksum))
tolog("..............................................................................................................")
# convert the preliminary metadata-<jobId>.xml file to OutputFiles-<jobId>.xml for NG and for CERNVM
# note: for CERNVM this is only really needed when CoPilot is used
if os.environ.has_key('Nordugrid_pilot') or sitename == 'CERNVM':
if RunJobUtilities.convertMetadata4NG(os.path.join(job.workdir, job.outputFilesXML), _fname, outsDict, dsname, datasetDict):
tolog("Metadata has been converted to NG/CERNVM format")
else:
job.pilotErrorDiag = "Could not convert metadata to NG/CERNVM format"
tolog("!!WARNING!!1999!! %s" % (job.pilotErrorDiag))
# try to build a file size and checksum dictionary for the output files
# outputFileInfo: {'a.dat': (fsize, checksum), ...}
# e.g.: file size for file a.dat: outputFileInfo['a.dat'][0]
# checksum for file a.dat: outputFileInfo['a.dat'][1]
try:
# remove the log entries
_fsize = fsize[1:]
_checksum = checksum[1:]
outputFileInfo = dict(zip(job.outFiles, zip(_fsize, _checksum)))
except Exception, e:
tolog("!!WARNING!!2993!! Could not create output file info dictionary: %s" % str(e))
outputFileInfo = {}
else:
tolog("Output file info dictionary created: %s" % str(outputFileInfo))
return ec, job, outputFileInfo
def isArchive(self, zipmap):
"""
Is the archive zipmap populated?
"""
if zipmap:
archive = True
else:
archive = False
return archive
def getDatasets(self, job, zipmap=None):
""" get the datasets for the output files """
# get the default dataset
if job.destinationDblock and job.destinationDblock[0] != 'NULL' and job.destinationDblock[0] != ' ':
dsname = job.destinationDblock[0]
else:
dsname = "%s-%s-%s" % (time.localtime()[0:3]) # pass it a random name
# create the dataset dictionary
# (if None, the dsname above will be used for all output files)
archive = self.isArchive(zipmap)
datasetDict = getDatasetDict(job.outFiles, job.destinationDblock, job.logFile, job.logDblock, archive=archive)
if datasetDict:
tolog("Dataset dictionary has been verified")
else:
tolog("Dataset dictionary could not be verified, output files will go to: %s" % (dsname))
return dsname, datasetDict
def stageOut_new(self,
job,
jobSite,
outs, # somehow prepared validated output files list (logfiles not included)
analysisJob, # not used, --> job.isAnalysisJob() should be used instead
dsname, # default dataset name to be used if file.destinationDblock is not set
datasetDict, # validated dict to resolve dataset name: datasetDict = dict(zip(outputFiles, destinationDblock)) + (logFile, logFileDblock)
outputFileInfo # validated dict: outputFileInfo = dict(zip(job.outFiles, zip(_fsize, _checksum)))
# can be calculated in Mover directly while transferring??
):
"""
perform the stage-out
:return: (rcode, job, rf, latereg=False) # latereg is always False
note: returning `job` is useless since reference passing
"""
# warning: in main workflow if jobReport is used as source for output file it completely overwtites job.outFiles ==> suppose it's wrong behaviour .. do extend outFiles instead.
# extend job.outData from job.outFiles (consider extra files extractOutputFilesFromJSON in the main workflow)
# populate guid and dataset values for job.outData
# copy all extra files from job.outFiles into structured job.outData
job._sync_outdata() # temporary work-around, reuse old workflow that populates job.outFilesGuids
try:
t0 = os.times()
rc, job.pilotErrorDiag, rf, _dummy, job.filesNormalStageOut, job.filesAltStageOut = mover.put_data_new(job, jobSite, stageoutTries=self.__stageoutretry, log_transfer=False, pinitdir=self.__pilot_initdir)
t1 = os.times()
job.timeStageOut = int(round(t1[4] - t0[4]))
except Exception, e:
t1 = os.times()
job.timeStageOut = int(round(t1[4] - t0[4]))
error = "Put function can not be called for staging out: %s, trace=%s" % (e, traceback.format_exc())
tolog(error)
rc = PilotErrors.ERR_PUTFUNCNOCALL
job.setState(["holding", job.result[1], rc])
return rc, job, None, False
tolog("Put function returned code: %s" % rc)
if rc:
if job.pilotErrorDiag:
job.pilotErrorDiag = job.pilotErrorDiag[-256:]
# check if the job is recoverable?
_state, _msg = "failed", "FAILED"
if PilotErrors.isRecoverableErrorCode(rc) and '(unrecoverable)' not in job.pilotErrorDiag:
_state, _msg = "holding", "WARNING"
job.setState([_state, job.result[1], rc])
tolog(" -- %s: %s" % (_msg, PilotErrors.getErrorStr(rc)))
else:
job.setState(["finished", 0, 0])
# create a weak lockfile meaning that file transfer worked
# (useful for job recovery if activated) in the job workdir
createLockFile(True, jobSite.workdir, lockfile="ALLFILESTRANSFERRED")
# create another lockfile in the site workdir since a transfer failure can still occur during the log transfer
# and a later recovery attempt will fail (job workdir will not exist at that time)
createLockFile(True, self.__pworkdir, lockfile="ALLFILESTRANSFERRED")
return rc, job, rf, False
@mover.use_newmover(stageOut_new)
def stageOut(self, job, jobSite, outs, analysisJob, dsname, datasetDict, outputFileInfo):
""" perform the stage-out """
error = PilotErrors()
pilotErrorDiag = ""
rc = 0
latereg = False
rf = None
# generate the xml for the output files and the site mover
pfnFile = "OutPutFileCatalog.xml"
try:
_status = pUtil.PFCxml(job.experiment, pfnFile, outs, fguids=job.outFilesGuids, fntag="pfn")
except Exception, e:
job.pilotErrorDiag = "PFCxml failed due to problematic XML: %s" % (e)
tolog("!!WARNING!!1113!! %s" % (job.pilotErrorDiag))
return error.ERR_MISSINGGUID, job, rf, latereg
else:
if not _status:
job.pilotErrorDiag = "Metadata contains missing guid(s) for output file(s)"
tolog("!!WARNING!!2999!! %s" % (job.pilotErrorDiag))
return error.ERR_MISSINGGUID, job, rf, latereg
tolog("Using the newly-generated %s/%s for put operation" % (job.workdir, pfnFile))
# the cmtconfig is needed by at least the xrdcp site mover
cmtconfig = getCmtconfig(job.cmtconfig)
rs = "" # return string from put_data with filename in case of transfer error
tin_0 = os.times()
try:
rc, job.pilotErrorDiag, rf, rs, job.filesNormalStageOut, job.filesAltStageOut, os_bucket_id = mover.mover_put_data("xmlcatalog_file:%s" % (pfnFile), dsname, jobSite.sitename,\
jobSite.computingElement, analysisJob=analysisJob, pinitdir=self.__pilot_initdir, proxycheck=self.__proxycheckFlag, datasetDict=datasetDict,\
outputDir=self.__outputDir, outputFileInfo=outputFileInfo, stageoutTries=self.__stageoutretry, cmtconfig=cmtconfig, job=job)
tin_1 = os.times()
job.timeStageOut = int(round(tin_1[4] - tin_0[4]))
except Exception, e:
tin_1 = os.times()
job.timeStageOut = int(round(tin_1[4] - tin_0[4]))
if 'format_exc' in traceback.__all__:
trace = traceback.format_exc()
pilotErrorDiag = "Put function can not be called for staging out: %s, %s" % (str(e), trace)
else:
tolog("traceback.format_exc() not available in this python version")
pilotErrorDiag = "Put function can not be called for staging out: %s" % (str(e))
tolog("!!WARNING!!3000!! %s" % (pilotErrorDiag))
rc = error.ERR_PUTFUNCNOCALL
job.setState(["holding", job.result[1], rc])
else:
if job.pilotErrorDiag != "":
if job.pilotErrorDiag.startswith("Put error:"):
pre = ""
else:
pre = "Put error: "
job.pilotErrorDiag = pre + tailPilotErrorDiag(job.pilotErrorDiag, size=256-len("pilot: Put error: "))
tolog("Put function returned code: %d" % (rc))
if rc != 0:
# remove any trailing "\r" or "\n" (there can be two of them)
if rs != None:
rs = rs.rstrip()
tolog("Error string: %s" % (rs))
# is the job recoverable?
if error.isRecoverableErrorCode(rc):
_state = "holding"
_msg = "WARNING"
else:
_state = "failed"
_msg = "FAILED"
# look for special error in the error string
if rs == "Error: string Limit exceeded 250":
tolog("!!%s!!3000!! Put error: file name string limit exceeded 250" % (_msg))
job.setState([_state, job.result[1], error.ERR_LRCREGSTRSIZE])
else:
job.setState([_state, job.result[1], rc])
tolog("!!%s!!1212!! %s" % (_msg, error.getErrorStr(rc)))
else:
# set preliminary finished (may be overwritten below)
job.setState(["finished", 0, 0])
# create a weak lockfile meaning that file transfer worked
# (useful for job recovery if activated) in the job workdir
createLockFile(True, jobSite.workdir, lockfile="ALLFILESTRANSFERRED")
# create another lockfile in the site workdir since a transfer failure can still occur during the log transfer
# and a later recovery attempt will fail (job workdir will not exist at that time)
createLockFile(True, self.__pworkdir, lockfile="ALLFILESTRANSFERRED")
if job.result[0] == "holding" and '(unrecoverable)' in job.pilotErrorDiag:
job.result[0] = "failed"
tolog("!!WARNING!!2999!! HOLDING state changed to FAILED since error is unrecoverable")
return rc, job, rf, latereg
def copyInputForFiles(self, workdir):
""" """
try:
cmd = "cp %s/inputFor_* %s" % (self.__pilot_initdir, workdir)
tolog("Executing command: %s" % (cmd))
out = commands.getoutput(cmd)
except IOError, e:
pass
tolog(out)
def getStdoutStderrFileObjects(self, stdoutName="stdout.txt", stderrName="stderr.txt"):
""" Create stdout/err file objects """
try:
stdout = open(os.path.join(os.getcwd(), stdoutName), "w")
stderr = open(os.path.join(os.getcwd(), stderrName), "w")
except Exception, e:
tolog("!!WARNING!!3330!! Failed to open stdout/err files: %s" % (e))
stdout = None
stderr = None
return stdout, stderr
def getSubprocess(self, thisExperiment, runCommand, stdout=None, stderr=None):
""" Execute a command as a subprocess """
# Execute and return the subprocess object
return thisExperiment.getSubprocess(runCommand, stdout=stdout, stderr=stderr)
# Methods used by event service RunJob* modules ..............................................................
def stripSetupCommand(self, cmd, trfName):
""" Remove the trf part of the setup command """
location = cmd.find(trfName)
return cmd[:location]
def executeMakeRunEventCollectionScript(self, cmd, eventcollection_filename):
""" Define and execute the event collection script """
cmd += "get_files -jo %s" % (eventcollection_filename)
tolog("Execute command: %s" % (cmd))
# WARNING: PUT A TIMER AROUND THIS COMMAND
rc, rs = commands.getstatusoutput(cmd)
return rc, rs
def prependMakeRunEventCollectionScript(self, input_file, output_file, eventcollection_filename):
""" Prepend the event collection script """
status = False
eventcollection_filename_mod = ""
with open(eventcollection_filename) as f1:
eventcollection_filename_mod = eventcollection_filename.replace(".py",".2.py")
with open(eventcollection_filename_mod, "w") as f2:
f2.write("EvtMax = -1\n")
f2.write("In = [ \'%s\' ]\n" % (input_file))
f2.write("Out = \'%s\'\n" % (output_file))
for line in f1:
f2.write(line)
f2.close()
f1.close()
status = True
return status, eventcollection_filename_mod
def executeTAGFileCommand(self, cmd, eventcollection_filename_mod):
""" Execute the TAG file creation script using athena """
cmd += "athena.py %s >MakeRunEventCollection-stdout.txt" % (eventcollection_filename_mod)
tolog("Executing command: %s" % (cmd))
# WARNING: PUT A TIMER AROUND THIS COMMAND
rc, rs = commands.getstatusoutput(cmd)
return rc, rs
def swapAthenaProcNumber(self, swap_value):
""" Swap the current ATHENA_PROC_NUMBER so that it does not upset the job """
# Note: only needed during TAG file creation
athena_proc_number = 0
try:
athena_proc_number = int(os.environ['ATHENA_PROC_NUMBER'])
except Exception, e:
tolog("ATHENA_PROC_NUMBER not defined, setting it to: %s" % (swap_value))
os.environ['ATHENA_PROC_NUMBER'] = str(swap_value)
else:
if swap_value == 0:
del os.environ['ATHENA_PROC_NUMBER']
tolog("Unset ATHENA_PROC_NUMBER")
else:
os.environ['ATHENA_PROC_NUMBER'] = str(swap_value)
tolog("ATHENA_PROC_NUMBER swapped from \'%d\' to \'%d\'" % (athena_proc_number, swap_value))
return athena_proc_number
def createTAGFile(self, jobExecutionCommand, trfName, inFiles, eventcollection_filename):
""" Create a TAG file """
tag_file = ""
tag_file_guid = getGUID()
# We cannot have ATHENA_PROC_NUMBER set to a value larger than 1, since that will
# activate AthenaMP. Reset it for now, and swap it back at the end of this method
athena_proc_number = self.swapAthenaProcNumber(0)
# Remove everything after the trf command from the job execution command
cmd = self.stripSetupCommand(jobExecutionCommand, trfName)
tolog("Stripped command: %s" % (cmd))
# Define and execute the event collection script
if cmd != "":
rc, rs = self.executeMakeRunEventCollectionScript(cmd, eventcollection_filename)
# Prepend the event collection script
if rc == 0:
input_file = inFiles[0]
tag_file = input_file + ".TAG"
status, eventcollection_filename_mod = self.prependMakeRunEventCollectionScript(input_file, tag_file, eventcollection_filename)
# Finally create the TAG file
if status:
rc, rs = self.executeTAGFileCommand(cmd, eventcollection_filename_mod)
if rc != 0:
tolog("!!WARNING!!3337!! Failed to create TAG file: rc=%d, rs=%s" % (rc, rs))
tag_file = ""
else:
tolog("!!WARNING!!3339!! Failed to download %s: rc=%d, rs=%s " % (eventcollection_filename, rc, rs))
else:
tolog("!!WARNING!!3330!! Failed to strip the job execution command, cannot create TAG file")
# Now swap the ATHENA_PROC_NUMBER since it is needed for activating AthenaMP
dummy = self.swapAthenaProcNumber(athena_proc_number)
return tag_file, tag_file_guid
def extractEventRanges(self, message):
""" Extract all event ranges from the server message """
# This function will return a list of event range dictionaries
event_ranges = []
try:
event_ranges = loads(message)
except Exception, e:
tolog("Could not extract any event ranges: %s" % (e))
return event_ranges
def unzipStagedFiles(self, job):
for inputZipFile in job.inputZipFiles:
inputZipFile = os.path.join(job.workdir, inputZipFile)
command = "tar -xf %s -C %s" % (inputZipFile, job.workdir)
tolog("Unzip file: %s" % command)
status, output = commands.getstatusoutput(command)
tolog("status: %s, output: %s\n" % (status, output))
# (end event service methods) ................................................................................
def handleAdditionalOutFiles(self, job, analysisJob):
""" Update output file lists in case there are additional output files in the jobReport """
# Note: only for production jobs
fromJSON = False
extracted_output_files, extracted_guids = extractOutputFiles(analysisJob, job.workdir, job.allowNoOutput, job.outFiles, job.outFilesGuids)
if extracted_output_files != []:
tolog("Will update the output file lists since files were discovered in the job report (production job) or listed in allowNoOutput and do not exist (user job)")
new_destinationDBlockToken = []
new_destinationDblock = []
new_scopeOut = []
try:
for f in extracted_output_files:
_destinationDBlockToken, _destinationDblock, _scopeOut = getDestinationDBlockItems(f, job.outFiles, job.destinationDBlockToken, job.destinationDblock, job.scopeOut)
new_destinationDBlockToken.append(_destinationDBlockToken)
new_destinationDblock.append(_destinationDblock)
new_scopeOut.append(_scopeOut)
except Exception, e:
tolog("!!WARNING!!3434!! Exception caught: %s" % (e))
else:
# Finally replace the output file lists
job.outFiles = extracted_output_files
job.destinationDblock = new_destinationDblock
job.destinationDBlockToken = new_destinationDBlockToken
job.scopeOut = new_scopeOut
tolog("Updated: job.outFiles=%s" % str(extracted_output_files))
tolog("Updated: job.destinationDblock=%s" % str(job.destinationDblock))
tolog("Updated: job.destinationDBlockToken=%s" % str(job.destinationDBlockToken))
tolog("Updated: job.scopeOut=%s" % str(job.scopeOut))
if extracted_guids != []:
fromJSON = True
job.outFilesGuids = extracted_guids
tolog("Updated: job.outFilesGuids=%s" % str(job.outFilesGuids))
else:
tolog("Empty extracted guids list")
return job, fromJSON
def createArchives(self, output_files, zipmapString, workdir):
""" Create archives for the files in the zip map """
# The zip_map dictionary itself is also created and returned by this function
# Note that the files are not to be further compressed (already assumed to be compressed)
zip_map = None
archive_names = None
if zipmapString != "":
zip_map = job.populateZipMap(output_files, zipmapString)
# Zip the output files according to the zip map
import zipfile
cwd = os.getcwd()
os.chdir(job.workdir)
for archive in zip_map.keys():
tolog("Creating zip archive %s for files %s" % (archive, zip_map[archive]))
fname = os.path.join(workdir, archive)
zf = zipfile.ZipFile(fname, mode='w', compression=zipfile.ZIP_STORED, allowZip64=True) # zero compression
for content_file in zip_map[archive]:
try:
tolog("Adding %s to archive .." % (content_file))
zf.write(content_file)
except Exception, e:
tolog("!!WARNING!!3333!! Failed to add file %s to archive - aborting: %s" % (content_file, e))
zip_map = None
break
if zf:
zf.close()
os.chdir(cwd)
if zip_map:
archive_names = zip_map.keys()
return zip_map, archive_names
def cleanupForZip(self, zip_map, archive_names, job, outs, outputFileInfo, datasetDict):
""" Remove redundant output files and update file lists """
for archive in archive_names:
# remove zipped output files from disk
file_indices = []
for filename in zip_map[archive]:
fname = os.path.join(job.workdir, filename)
try:
os.remove("%s" % (fname))
except Exception,e:
tolog("!!WARNING!!3000!! Failed to delete file %s: %s" % (fname, str(e)))
pass
# find the list index for the file (we need to remove the related file info from several lists)
if filename in job.outFiles:
# store the file index and remove the file from the outs list
file_indices.append(job.outFiles.index(filename))
outs.remove(filename)
else:
tolog("!!WARNING!!3454!! Failed to locate file %s in outFiles list" % (filename))
# remove 'filename' key from dictionaries if it exists
dummy = outputFileInfo.pop(filename, None)
dummy = datasetDict.pop(filename, None)
# now remove the file from the related lists (in reverse order)
for index in reversed(file_indices):
del job.outFiles[index]
del job.outFilesGuids[index]
del job.destinationDblock[index]
del job.destinationDBlockToken[index]
del job.scopeOut[index]
return job, outs, outputFileInfo
# main process starts here
if __name__ == "__main__":
# Get error handler
error = PilotErrors()
# Get runJob object
runJob = RunJob()
# Define a new parent group
os.setpgrp()
# Protect the runJob code with exception handling
hP_ret = False
try:
# always use this filename as the new jobDef module name
import newJobDef
jobSite = Site.Site()
return_tuple = runJob.argumentParser()
tolog("argumentParser returned: %s" % str(return_tuple))
jobSite.setSiteInfo(return_tuple)
# jobSite.setSiteInfo(argParser(sys.argv[1:]))
# reassign workdir for this job
jobSite.workdir = jobSite.wntmpdir
if runJob.getPilotLogFilename() != "":
pUtil.setPilotlogFilename(runJob.getPilotLogFilename())
# set node info
node = Node.Node()
node.setNodeName(os.uname()[1])
node.collectWNInfo(jobSite.workdir)
# redirect stder
sys.stderr = open("%s/runjob.stderr" % (jobSite.workdir), "w")
tolog("Current job workdir is: %s" % os.getcwd())
tolog("Site workdir is: %s" % jobSite.workdir)
# get the experiment object
thisExperiment = getExperiment(runJob.getExperiment())
tolog("RunJob will serve experiment: %s" % (thisExperiment.getExperiment()))
# set the cache (used e.g. by LSST)
if runJob.getCache():
thisExperiment.setCache(runJob.getCache())
JR = JobRecovery()
try:
job = Job.Job()
job.workdir = jobSite.workdir
job.setJobDef(newJobDef.job)
job.workdir = jobSite.workdir
job.experiment = runJob.getExperiment()
# figure out and set payload file names
job.setPayloadName(thisExperiment.getPayloadName(job))
logGUID = newJobDef.job.get('logGUID', "")
if logGUID != "NULL" and logGUID != "":
job.tarFileGuid = logGUID
except Exception, e:
pilotErrorDiag = "Failed to process job info: %s" % str(e)
tolog("!!WARNING!!3000!! %s" % (pilotErrorDiag))
runJob.failJob(0, error.ERR_UNKNOWN, job, pilotErrorDiag=pilotErrorDiag)
# prepare for the output file data directory
# (will only created for jobs that end up in a 'holding' state)
job.datadir = runJob.getParentWorkDir() + "/PandaJob_%s_data" % (job.jobId)
# register cleanup function
atexit.register(runJob.cleanup, job)
# to trigger an exception so that the SIGTERM signal can trigger cleanup function to run
# because by default signal terminates process without cleanup.
def sig2exc(sig, frm):
""" signal handler """
error = PilotErrors()
runJob.setGlobalPilotErrorDiag("!!FAILED!!3000!! SIGTERM Signal %s is caught in child pid=%d!\n" % (sig, os.getpid()))
tolog(runJob.getGlobalPilotErrorDiag())
if sig == signal.SIGTERM:
runJob.setGlobalErrorCode(error.ERR_SIGTERM)
elif sig == signal.SIGQUIT:
runJob.setGlobalErrorCode(error.ERR_SIGQUIT)
elif sig == signal.SIGSEGV:
runJob.setGlobalErrorCode(error.ERR_SIGSEGV)
elif sig == signal.SIGXCPU:
runJob.setGlobalErrorCode(error.ERR_SIGXCPU)
elif sig == signal.SIGBUS:
runJob.setGlobalErrorCode(error.ERR_SIGBUS)
elif sig == signal.SIGUSR1:
runJob.setGlobalErrorCode(error.ERR_SIGUSR1)
else:
runJob.setGlobalErrorCode(error.ERR_KILLSIGNAL)
runJob.setFailureCode(runJob.getGlobalErrorCode())
# print to stderr
print >> sys.stderr, runJob.getGlobalPilotErrorDiag()
raise SystemError(sig)
signal.signal(signal.SIGTERM, sig2exc)
signal.signal(signal.SIGQUIT, sig2exc)
signal.signal(signal.SIGSEGV, sig2exc)
signal.signal(signal.SIGXCPU, sig2exc)
signal.signal(signal.SIGUSR1, sig2exc)
signal.signal(signal.SIGBUS, sig2exc)
# see if it's an analysis job or not
analysisJob = job.isAnalysisJob()
if analysisJob:
tolog("User analysis job")
else:
tolog("Production job")
tolog("runJob received a job with prodSourceLabel=%s" % (job.prodSourceLabel))
# setup starts here ................................................................................
# update the job state file
job.jobState = "setup"
_retjs = JR.updateJobStateTest(job, jobSite, node, mode="test")
# send [especially] the process group back to the pilot
job.setState([job.jobState, 0, 0])
rt = RunJobUtilities.updatePilotServer(job, runJob.getPilotServer(), runJob.getPilotPort())
# in case zipmaps will be used for the output files, save the zipmap string for later use and remove it from the jobPars
if "ZIP_MAP" in job.jobPars:
job.jobPars, zipmapString = job.removeZipMapString(job.jobPars)
tolog("Extracted zipmap string from jobPars: %s (removed from jobPars)" % (zipmapString))
else:
zipmapString = ""
# prepare the setup and get the run command list
ec, runCommandList, job, multi_trf = runJob.setup(job, jobSite, thisExperiment)
if ec != 0:
tolog("!!WARNING!!2999!! runJob setup failed: %s" % (job.pilotErrorDiag))
runJob.failJob(0, ec, job, pilotErrorDiag=job.pilotErrorDiag)
tolog("Setup has finished successfully")
# job has been updated, display it again
job.displayJob()
# (setup ends here) ................................................................................
tolog("Setting stage-in state until all input files have been copied")
job.setState(["stagein", 0, 0])
# send the special setup string back to the pilot (needed for the log transfer on xrdcp systems)
rt = RunJobUtilities.updatePilotServer(job, runJob.getPilotServer(), runJob.getPilotPort())
# stage-in .........................................................................................
# benchmark ........................................................................................
# Launch the benchmark, let it execute during setup + stage-in
benchmark_subprocess = runJob.getBenchmarkSubprocess(node, job.coreCount, job.workdir, jobSite.sitename)
# update the job state file
job.jobState = "stagein"
_retjs = JR.updateJobStateTest(job, jobSite, node, mode="test")
# update copysetup[in] for production jobs if brokerage has decided that remote I/O should be used
if job.transferType == 'direct' or job.transferType == 'fax':
tolog('Brokerage has set transfer type to \"%s\" (remote I/O will be attempted for input files)' %\
(job.transferType))
RunJobUtilities.updateCopysetups('', transferType=job.transferType)
si = getSiteInformation(runJob.getExperiment())
si.updateDirectAccess(job.transferType)
# stage-in all input files (if necessary)
job, ins, statusPFCTurl, usedFAXandDirectIO = runJob.stageIn(job, jobSite, analysisJob)
if job.result[2] != 0:
tolog("Failing job with ec: %d" % (ec))
runJob.failJob(0, job.result[2], job, ins=ins, pilotErrorDiag=job.pilotErrorDiag)
# after stageIn, all file transfer modes are known (copy_to_scratch, file_stager, remote_io)
# consult the FileState file dictionary if cmd3 should be updated (--directIn should not be set if all
# remote_io modes have been changed to copy_to_scratch as can happen with ByteStream files)
# and update the run command list if necessary.
# in addition to the above, if FAX is used as a primary site mover and direct access is enabled, then
# the run command should not contain the --oldPrefix, --newPrefix options but use --usePFCTurl
hasInput = job.inFiles != ['']
runCommandList = RunJobUtilities.updateRunCommandList(runCommandList, runJob.getParentWorkDir(), job.jobId, statusPFCTurl, analysisJob, usedFAXandDirectIO, hasInput, job.prodDBlockToken)
# copy any present @inputFor_* files from the pilot init dir to the rundirectory (used for ES merge jobs)
#runJob.copyInputForFiles(job.workdir)
# unzip the staged in file if necessary
runJob.unzipStagedFiles(job)
# (stage-in ends here) .............................................................................
# Loop until the benchmark subprocess has finished
if benchmark_subprocess:
max_count = 6
_sleep = 15
count = 0
while benchmark_subprocess.poll() is None:
if count >= max_count:
benchmark_subprocess.send_signal(signal.SIGUSR1)
tolog("Terminated the benchmark since it ran for longer than %d s" % (max_count*_sleep))
break
else:
count += 1
# Take a short nap
tolog("Benchmark suite has not finished yet, taking a %d s nap (iteration #%d/%d)" % (_sleep, count, max_count))
time.sleep(_sleep)
# (benchmark ends here) ............................................................................
# change to running state since all input files have been staged
tolog("Changing to running state since all input files have been staged")
job.setState(["running", 0, 0])
rt = RunJobUtilities.updatePilotServer(job, runJob.getPilotServer(), runJob.getPilotPort())
# update the job state file
job.jobState = "running"
_retjs = JR.updateJobStateTest(job, jobSite, node, mode="test")
# run the job(s) ...................................................................................
# set ATLAS_CONDDB if necessary, and other env vars
RunJobUtilities.setEnvVars(jobSite.sitename)
# execute the payload
res, job, getstatusoutput_was_interrupted, current_job_number = runJob.executePayload(thisExperiment, runCommandList, job)
# payload error handling
ed = ErrorDiagnosis()
job = ed.interpretPayload(job, res, getstatusoutput_was_interrupted, current_job_number, runCommandList, runJob.getFailureCode())
if job.result[1] != 0 or job.result[2] != 0:
if job.eventServiceMerge:
if runJob.corruptedFiles:
job.corruptedFiles = ','.join([e['lfn'] for e in runJob.corruptedFiles])
job.result[2] = runJob.corruptedFiles[0]['status_code']
else:
job.result[2] = PilotErrors.ERR_ESRECOVERABLE
runJob.failJob(job.result[1], job.result[2], job, pilotErrorDiag=job.pilotErrorDiag)
# stage-out ........................................................................................
# update the job state file
job.jobState = "stageout"
_retjs = JR.updateJobStateTest(job, jobSite, node, mode="test")
# are there any additional output files created by the trf/payload? if so, the outut file list must be updated
job, fromJSON = runJob.handleAdditionalOutFiles(job, analysisJob)
# should any output be zipped? if so, the zipmapString was previously set (otherwise the returned variables are set to None)
# zip_map, archive_names = runJob.createArchives(job.outFiles, zipmapString, job.workdir)
zip_map = None
if zip_map:
# Add the zip archives to the output file lists
job.outFiles, job.destinationDblock, job.destinationDBlockToken, job.scopeOut = job.addArchivesToOutput(zip_map,
job.inFiles,
job.outFiles,
job.dispatchDblock,
job.destinationDblock,
job.dispatchDBlockToken,
job.destinationDBlockToken,
job.scopeIn,
job.scopeOut)
#job.outFiles, job.destinationDblock, job.destinationDBlockToken, job.scopeOut = job.addArchivesToOutput(zip_map, job.outFiles, job.destinationDblock, job.destinationDBlockToken, job.scopeOut)
# verify and prepare and the output files for transfer
ec, pilotErrorDiag, outs, outsDict = RunJobUtilities.prepareOutFiles(job.outFiles, job.logFile, job.workdir)
if ec:
# missing output file (only error code from prepareOutFiles)
runJob.failJob(job.result[1], ec, job, pilotErrorDiag=pilotErrorDiag)
tolog("outs=%s"%str(outs))
tolog("outsDict=%s"%str(outsDict))
# if payload leaves the input files, delete them explicitly
if ins and not zip_map:
ec = pUtil.removeFiles(job.workdir, ins)
# update the current file states
updateFileStates(outs, runJob.getParentWorkDir(), job.jobId, mode="file_state", state="created")
dumpFileStates(runJob.getParentWorkDir(), job.jobId)
# create xml string to pass to server
outputFileInfo = {}
if outs or (job.logFile and job.logFile != ''):
# get the datasets for the output files
dsname, datasetDict = runJob.getDatasets(job, zipmap=zip_map)
tolog("datasetDict=%s"%str(datasetDict))
# re-create the metadata.xml file, putting guids of ALL output files into it.
# output files that miss guids from the job itself will get guids in PFCxml function
# first rename and copy the trf metadata file for non-build jobs
if not pUtil.isBuildJob(outs):
runJob.moveTrfMetadata(job.workdir, job.jobId)
# create the metadata for the output + log files
ec = 0
try:
ec, job, outputFileInfo = runJob.createFileMetadata(list(outs), job, outsDict, dsname, datasetDict, jobSite.sitename, analysisJob=analysisJob, fromJSON=fromJSON)
except Exception as e:
job.pilotErrorDiag = "Exception caught: %s" % e
tolog(job.pilotErrorDiag)
ec = error.ERR_BADXML
job.result[0] = "Badly formed XML (PoolFileCatalog.xml could not be parsed)"
job.result[2] = ec
if ec:
runJob.failJob(0, ec, job, pilotErrorDiag=job.pilotErrorDiag)
tolog("outputFileInfo=%s"%str(outputFileInfo))
# in case the output files have been zipped, it is now safe to remove them and update the outFiles list
# should only be executed if Archive_rf is skipped and pilot does all zipping
if zip_map and False:
tolog('Zip map cleanup pass #1 (skipped)')
# job, outs, outputFileInfo = runJob.cleanupForZip(zip_map, archive_names, job, outs, outputFileInfo, datasetDict)
tolog('Zip map cleanup pass #2')
job.outFiles, job.destinationDblock, job.destinationDBlockToken, job.scopeOut, outs = \
job.removeInputFromOutputLists(job.inFiles, job.outFiles, job.destinationDblock, job.destinationDBlockToken, job.scopeOut, outs)
tolog('Zip map cleanup pass #3')
ec = pUtil.removeFiles(job.workdir, ins)
# move output files from workdir to local DDM area
finalUpdateDone = False
if outs:
# If clone job, make sure that stage-out should be performed
if job.cloneJob == "storeonce":
try:
message = downloadEventRanges(job.jobId, job.jobsetID, job.taskID, url=runJob.getPanDAServer())
# Create a list of event ranges from the downloaded message
event_ranges = runJob.extractEventRanges(message)
# Are there any event ranges?
if event_ranges == []:
tolog("!!WARNING!!2424!! This clone job was already executed and stored")
exitMsg = "Already executed/stored clone job"
res_tuple = (1, exitMsg)
res = (res_tuple[0], res_tuple[1], exitMsg)
job.result[0] = exitMsg
job.result[1] = 0 # transExitCode
job.result[2] = runJob.__error.ERR_EXECUTEDCLONEJOB # Pilot error code
job.pilotErrorDiag = exitMsg
runJob.failJob(0, ec, job, pilotErrorDiag=job.pilotErrorDiag)
else:
tolog("Ok to stage out clone job")
except Exception, e:
tolog("!1WARNING!!2324!! Exception caught: %s" % (e))
tolog("Setting stage-out state until all output files have been copied")
job.setState(["stageout", 0, 0])
rt = RunJobUtilities.updatePilotServer(job, runJob.getPilotServer(), runJob.getPilotPort())
# Stage-out output files
ec, job, rf, latereg = runJob.stageOut(job, jobSite, outs, analysisJob, dsname, datasetDict, outputFileInfo)
# Error handling
if job.result[0] == "finished" or ec == error.ERR_PUTFUNCNOCALL:
rt = RunJobUtilities.updatePilotServer(job, runJob.getPilotServer(), runJob.getPilotPort(), final=True)
else:
rt = RunJobUtilities.updatePilotServer(job, runJob.getPilotServer(), runJob.getPilotPort(), final=True, latereg=latereg)
if ec == error.ERR_NOSTORAGE:
# Update the current file states for all files since nothing could be transferred
updateFileStates(outs, runJob.getParentWorkDir(), job.jobId, mode="file_state", state="not_transferred")
dumpFileStates(runJob.getParentWorkDir(), job.jobId)
finalUpdateDone = True
if ec != 0:
runJob.sysExit(job, rf)
# (Stage-out ends here) .......................................................................
job.setState(["finished", 0, 0])
if not finalUpdateDone:
rt = RunJobUtilities.updatePilotServer(job, runJob.getPilotServer(), runJob.getPilotPort(), final=True)
runJob.sysExit(job)
except Exception, errorMsg:
error = PilotErrors()
if runJob.getGlobalPilotErrorDiag() != "":
pilotErrorDiag = "Exception caught in RunJob: %s" % (runJob.getGlobalPilotErrorDiag())
else:
pilotErrorDiag = "Exception caught in RunJob: %s" % str(errorMsg)
if 'format_exc' in traceback.__all__:
pilotErrorDiag += ", " + traceback.format_exc()
try:
tolog("!!FAILED!!3001!! %s" % (pilotErrorDiag))
except Exception, e:
if len(pilotErrorDiag) > 10000:
pilotErrorDiag = pilotErrorDiag[:10000]
tolog("!!FAILED!!3001!! Truncated (%s): %s" % (e, pilotErrorDiag))
else:
pilotErrorDiag = "Exception caught in runJob: %s" % (e)
tolog("!!FAILED!!3001!! %s" % (pilotErrorDiag))
# # restore the proxy if necessary
# if hP_ret:
# rP_ret = proxyguard.restoreProxy()
# if not rP_ret:
# tolog("Warning: Problems with storage can occur since proxy could not be restored")
# else:
# hP_ret = False
# tolog("ProxyGuard has finished successfully")
tolog("sys.path=%s" % str(sys.path))
cmd = "pwd;ls -lF %s;ls -lF;ls -lF .." % (runJob.getPilotInitDir())
tolog("Executing command: %s" % (cmd))
out = commands.getoutput(cmd)
tolog("%s" % (out))
job = Job.Job()
job.setJobDef(newJobDef.job)
job.pilotErrorDiag = pilotErrorDiag
job.result[0] = "failed"
if runJob.getGlobalErrorCode() != 0:
job.result[2] = runJob.getGlobalErrorCode()
else:
job.result[2] = error.ERR_RUNJOBEXC
tolog("Failing job with error code: %d" % (job.result[2]))
# fail the job without calling sysExit/cleanup (will be called anyway)
runJob.failJob(0, job.result[2], job, pilotErrorDiag=pilotErrorDiag, docleanup=False)
# end of runJob
| apache-2.0 |
vinodkc/spark | python/pyspark/pandas/datetimes.py | 15 | 26546 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Date/Time related functions on pandas-on-Spark Series
"""
from typing import Any, Optional, Union, TYPE_CHECKING, no_type_check
import numpy as np # noqa: F401 (SPARK-34943)
import pandas as pd # noqa: F401
from pandas.tseries.offsets import DateOffset
import pyspark.sql.functions as F
from pyspark.sql.types import DateType, TimestampType, LongType
if TYPE_CHECKING:
import pyspark.pandas as ps # noqa: F401 (SPARK-34943)
class DatetimeMethods(object):
"""Date/Time methods for pandas-on-Spark Series"""
def __init__(self, series: "ps.Series"):
if not isinstance(series.spark.data_type, (DateType, TimestampType)):
raise ValueError(
"Cannot call DatetimeMethods on type {}".format(series.spark.data_type)
)
self._data = series
# Properties
@property
def date(self) -> "ps.Series":
"""
Returns a Series of python datetime.date objects (namely, the date
part of Timestamps without timezone information).
"""
# TODO: Hit a weird exception
# syntax error in attribute name: `to_date(`start_date`)` with alias
return self._data.spark.transform(F.to_date)
@property
def time(self) -> "ps.Series":
raise NotImplementedError()
@property
def timetz(self) -> "ps.Series":
raise NotImplementedError()
@property
def year(self) -> "ps.Series":
"""
The year of the datetime.
"""
return self._data.spark.transform(lambda c: F.year(c).cast(LongType()))
@property
def month(self) -> "ps.Series":
"""
The month of the timestamp as January = 1 December = 12.
"""
return self._data.spark.transform(lambda c: F.month(c).cast(LongType()))
@property
def day(self) -> "ps.Series":
"""
The days of the datetime.
"""
return self._data.spark.transform(lambda c: F.dayofmonth(c).cast(LongType()))
@property
def hour(self) -> "ps.Series":
"""
The hours of the datetime.
"""
return self._data.spark.transform(lambda c: F.hour(c).cast(LongType()))
@property
def minute(self) -> "ps.Series":
"""
The minutes of the datetime.
"""
return self._data.spark.transform(lambda c: F.minute(c).cast(LongType()))
@property
def second(self) -> "ps.Series":
"""
The seconds of the datetime.
"""
return self._data.spark.transform(lambda c: F.second(c).cast(LongType()))
@property
def microsecond(self) -> "ps.Series":
"""
The microseconds of the datetime.
"""
@no_type_check
def pandas_microsecond(s) -> "ps.Series[np.int64]":
return s.dt.microsecond
return self._data.pandas_on_spark.transform_batch(pandas_microsecond)
@property
def nanosecond(self) -> "ps.Series":
raise NotImplementedError()
@property
def week(self) -> "ps.Series":
"""
The week ordinal of the year.
"""
return self._data.spark.transform(lambda c: F.weekofyear(c).cast(LongType()))
@property
def weekofyear(self) -> "ps.Series":
return self.week
weekofyear.__doc__ = week.__doc__
@property
def dayofweek(self) -> "ps.Series":
"""
The day of the week with Monday=0, Sunday=6.
Return the day of the week. It is assumed the week starts on
Monday, which is denoted by 0 and ends on Sunday which is denoted
by 6. This method is available on both Series with datetime
values (using the `dt` accessor).
Returns
-------
Series
Containing integers indicating the day number.
See Also
--------
Series.dt.dayofweek : Alias.
Series.dt.weekday : Alias.
Series.dt.day_name : Returns the name of the day of the week.
Examples
--------
>>> s = ps.from_pandas(pd.date_range('2016-12-31', '2017-01-08', freq='D').to_series())
>>> s.dt.dayofweek
2016-12-31 5
2017-01-01 6
2017-01-02 0
2017-01-03 1
2017-01-04 2
2017-01-05 3
2017-01-06 4
2017-01-07 5
2017-01-08 6
dtype: int64
"""
@no_type_check
def pandas_dayofweek(s) -> "ps.Series[np.int64]":
return s.dt.dayofweek
return self._data.pandas_on_spark.transform_batch(pandas_dayofweek)
@property
def weekday(self) -> "ps.Series":
return self.dayofweek
weekday.__doc__ = dayofweek.__doc__
@property
def dayofyear(self) -> "ps.Series":
"""
The ordinal day of the year.
"""
@no_type_check
def pandas_dayofyear(s) -> "ps.Series[np.int64]":
return s.dt.dayofyear
return self._data.pandas_on_spark.transform_batch(pandas_dayofyear)
@property
def quarter(self) -> "ps.Series":
"""
The quarter of the date.
"""
@no_type_check
def pandas_quarter(s) -> "ps.Series[np.int64]":
return s.dt.quarter
return self._data.pandas_on_spark.transform_batch(pandas_quarter)
@property
def is_month_start(self) -> "ps.Series":
"""
Indicates whether the date is the first day of the month.
Returns
-------
Series
For Series, returns a Series with boolean values.
See Also
--------
is_month_end : Return a boolean indicating whether the date
is the last day of the month.
Examples
--------
This method is available on Series with datetime values under
the ``.dt`` accessor.
>>> s = ps.Series(pd.date_range("2018-02-27", periods=3))
>>> s
0 2018-02-27
1 2018-02-28
2 2018-03-01
dtype: datetime64[ns]
>>> s.dt.is_month_start
0 False
1 False
2 True
dtype: bool
"""
@no_type_check
def pandas_is_month_start(s) -> "ps.Series[bool]":
return s.dt.is_month_start
return self._data.pandas_on_spark.transform_batch(pandas_is_month_start)
@property
def is_month_end(self) -> "ps.Series":
"""
Indicates whether the date is the last day of the month.
Returns
-------
Series
For Series, returns a Series with boolean values.
See Also
--------
is_month_start : Return a boolean indicating whether the date
is the first day of the month.
Examples
--------
This method is available on Series with datetime values under
the ``.dt`` accessor.
>>> s = ps.Series(pd.date_range("2018-02-27", periods=3))
>>> s
0 2018-02-27
1 2018-02-28
2 2018-03-01
dtype: datetime64[ns]
>>> s.dt.is_month_end
0 False
1 True
2 False
dtype: bool
"""
@no_type_check
def pandas_is_month_end(s) -> "ps.Series[bool]":
return s.dt.is_month_end
return self._data.pandas_on_spark.transform_batch(pandas_is_month_end)
@property
def is_quarter_start(self) -> "ps.Series":
"""
Indicator for whether the date is the first day of a quarter.
Returns
-------
is_quarter_start : Series
The same type as the original data with boolean values. Series will
have the same name and index.
See Also
--------
quarter : Return the quarter of the date.
is_quarter_end : Similar property for indicating the quarter start.
Examples
--------
This method is available on Series with datetime values under
the ``.dt`` accessor.
>>> df = ps.DataFrame({'dates': pd.date_range("2017-03-30",
... periods=4)})
>>> df
dates
0 2017-03-30
1 2017-03-31
2 2017-04-01
3 2017-04-02
>>> df.dates.dt.quarter
0 1
1 1
2 2
3 2
Name: dates, dtype: int64
>>> df.dates.dt.is_quarter_start
0 False
1 False
2 True
3 False
Name: dates, dtype: bool
"""
@no_type_check
def pandas_is_quarter_start(s) -> "ps.Series[bool]":
return s.dt.is_quarter_start
return self._data.pandas_on_spark.transform_batch(pandas_is_quarter_start)
@property
def is_quarter_end(self) -> "ps.Series":
"""
Indicator for whether the date is the last day of a quarter.
Returns
-------
is_quarter_end : Series
The same type as the original data with boolean values. Series will
have the same name and index.
See Also
--------
quarter : Return the quarter of the date.
is_quarter_start : Similar property indicating the quarter start.
Examples
--------
This method is available on Series with datetime values under
the ``.dt`` accessor.
>>> df = ps.DataFrame({'dates': pd.date_range("2017-03-30",
... periods=4)})
>>> df
dates
0 2017-03-30
1 2017-03-31
2 2017-04-01
3 2017-04-02
>>> df.dates.dt.quarter
0 1
1 1
2 2
3 2
Name: dates, dtype: int64
>>> df.dates.dt.is_quarter_start
0 False
1 False
2 True
3 False
Name: dates, dtype: bool
"""
@no_type_check
def pandas_is_quarter_end(s) -> "ps.Series[bool]":
return s.dt.is_quarter_end
return self._data.pandas_on_spark.transform_batch(pandas_is_quarter_end)
@property
def is_year_start(self) -> "ps.Series":
"""
Indicate whether the date is the first day of a year.
Returns
-------
Series
The same type as the original data with boolean values. Series will
have the same name and index.
See Also
--------
is_year_end : Similar property indicating the last day of the year.
Examples
--------
This method is available on Series with datetime values under
the ``.dt`` accessor.
>>> dates = ps.Series(pd.date_range("2017-12-30", periods=3))
>>> dates
0 2017-12-30
1 2017-12-31
2 2018-01-01
dtype: datetime64[ns]
>>> dates.dt.is_year_start
0 False
1 False
2 True
dtype: bool
"""
@no_type_check
def pandas_is_year_start(s) -> "ps.Series[bool]":
return s.dt.is_year_start
return self._data.pandas_on_spark.transform_batch(pandas_is_year_start)
@property
def is_year_end(self) -> "ps.Series":
"""
Indicate whether the date is the last day of the year.
Returns
-------
Series
The same type as the original data with boolean values. Series will
have the same name and index.
See Also
--------
is_year_start : Similar property indicating the start of the year.
Examples
--------
This method is available on Series with datetime values under
the ``.dt`` accessor.
>>> dates = ps.Series(pd.date_range("2017-12-30", periods=3))
>>> dates
0 2017-12-30
1 2017-12-31
2 2018-01-01
dtype: datetime64[ns]
>>> dates.dt.is_year_end
0 False
1 True
2 False
dtype: bool
"""
@no_type_check
def pandas_is_year_end(s) -> "ps.Series[bool]":
return s.dt.is_year_end
return self._data.pandas_on_spark.transform_batch(pandas_is_year_end)
@property
def is_leap_year(self) -> "ps.Series":
"""
Boolean indicator if the date belongs to a leap year.
A leap year is a year, which has 366 days (instead of 365) including
29th of February as an intercalary day.
Leap years are years which are multiples of four with the exception
of years divisible by 100 but not by 400.
Returns
-------
Series
Booleans indicating if dates belong to a leap year.
Examples
--------
This method is available on Series with datetime values under
the ``.dt`` accessor.
>>> dates_series = ps.Series(pd.date_range("2012-01-01", "2015-01-01", freq="Y"))
>>> dates_series
0 2012-12-31
1 2013-12-31
2 2014-12-31
dtype: datetime64[ns]
>>> dates_series.dt.is_leap_year
0 True
1 False
2 False
dtype: bool
"""
@no_type_check
def pandas_is_leap_year(s) -> "ps.Series[bool]":
return s.dt.is_leap_year
return self._data.pandas_on_spark.transform_batch(pandas_is_leap_year)
@property
def daysinmonth(self) -> "ps.Series":
"""
The number of days in the month.
"""
@no_type_check
def pandas_daysinmonth(s) -> "ps.Series[np.int64]":
return s.dt.daysinmonth
return self._data.pandas_on_spark.transform_batch(pandas_daysinmonth)
@property
def days_in_month(self) -> "ps.Series":
return self.daysinmonth
days_in_month.__doc__ = daysinmonth.__doc__
# Methods
@no_type_check
def tz_localize(self, tz) -> "ps.Series":
"""
Localize tz-naive Datetime column to tz-aware Datetime column.
"""
# Neither tz-naive or tz-aware datetime exists in Spark
raise NotImplementedError()
@no_type_check
def tz_convert(self, tz) -> "ps.Series":
"""
Convert tz-aware Datetime column from one time zone to another.
"""
# tz-aware datetime doesn't exist in Spark
raise NotImplementedError()
def normalize(self) -> "ps.Series":
"""
Convert times to midnight.
The time component of the date-time is converted to midnight i.e.
00:00:00. This is useful in cases, when the time does not matter.
Length is unaltered. The timezones are unaffected.
This method is available on Series with datetime values under
the ``.dt`` accessor, and directly on Datetime Array.
Returns
-------
Series
The same type as the original data. Series will have the same
name and index.
See Also
--------
floor : Floor the series to the specified freq.
ceil : Ceil the series to the specified freq.
round : Round the series to the specified freq.
Examples
--------
>>> series = ps.Series(pd.Series(pd.date_range('2012-1-1 12:45:31', periods=3, freq='M')))
>>> series.dt.normalize()
0 2012-01-31
1 2012-02-29
2 2012-03-31
dtype: datetime64[ns]
"""
@no_type_check
def pandas_normalize(s) -> "ps.Series[np.datetime64]":
return s.dt.normalize()
return self._data.pandas_on_spark.transform_batch(pandas_normalize)
def strftime(self, date_format: str) -> "ps.Series":
"""
Convert to a string Series using specified date_format.
Return an series of formatted strings specified by date_format, which
supports the same string format as the python standard library. Details
of the string format can be found in python string format
doc.
Parameters
----------
date_format : str
Date format string (example: "%%Y-%%m-%%d").
Returns
-------
Series
Series of formatted strings.
See Also
--------
to_datetime : Convert the given argument to datetime.
normalize : Return series with times to midnight.
round : Round the series to the specified freq.
floor : Floor the series to the specified freq.
Examples
--------
>>> series = ps.Series(pd.date_range(pd.Timestamp("2018-03-10 09:00"),
... periods=3, freq='s'))
>>> series
0 2018-03-10 09:00:00
1 2018-03-10 09:00:01
2 2018-03-10 09:00:02
dtype: datetime64[ns]
>>> series.dt.strftime('%B %d, %Y, %r')
0 March 10, 2018, 09:00:00 AM
1 March 10, 2018, 09:00:01 AM
2 March 10, 2018, 09:00:02 AM
dtype: object
"""
@no_type_check
def pandas_strftime(s) -> "ps.Series[str]":
return s.dt.strftime(date_format)
return self._data.pandas_on_spark.transform_batch(pandas_strftime)
def round(self, freq: Union[str, DateOffset], *args: Any, **kwargs: Any) -> "ps.Series":
"""
Perform round operation on the data to the specified freq.
Parameters
----------
freq : str or Offset
The frequency level to round the index to. Must be a fixed
frequency like 'S' (second) not 'ME' (month end).
nonexistent : 'shift_forward', 'shift_backward, 'NaT', timedelta, default 'raise'
A nonexistent time does not exist in a particular timezone
where clocks moved forward due to DST.
- 'shift_forward' will shift the nonexistent time forward to the
closest existing time
- 'shift_backward' will shift the nonexistent time backward to the
closest existing time
- 'NaT' will return NaT where there are nonexistent times
- timedelta objects will shift nonexistent times by the timedelta
- 'raise' will raise an NonExistentTimeError if there are
nonexistent times
.. note:: this option only works with pandas 0.24.0+
Returns
-------
Series
a Series with the same index for a Series.
Raises
------
ValueError if the `freq` cannot be converted.
Examples
--------
>>> series = ps.Series(pd.date_range('1/1/2018 11:59:00', periods=3, freq='min'))
>>> series
0 2018-01-01 11:59:00
1 2018-01-01 12:00:00
2 2018-01-01 12:01:00
dtype: datetime64[ns]
>>> series.dt.round("H")
0 2018-01-01 12:00:00
1 2018-01-01 12:00:00
2 2018-01-01 12:00:00
dtype: datetime64[ns]
"""
@no_type_check
def pandas_round(s) -> "ps.Series[np.datetime64]":
return s.dt.round(freq, *args, **kwargs)
return self._data.pandas_on_spark.transform_batch(pandas_round)
def floor(self, freq: Union[str, DateOffset], *args: Any, **kwargs: Any) -> "ps.Series":
"""
Perform floor operation on the data to the specified freq.
Parameters
----------
freq : str or Offset
The frequency level to floor the index to. Must be a fixed
frequency like 'S' (second) not 'ME' (month end).
nonexistent : 'shift_forward', 'shift_backward, 'NaT', timedelta, default 'raise'
A nonexistent time does not exist in a particular timezone
where clocks moved forward due to DST.
- 'shift_forward' will shift the nonexistent time forward to the
closest existing time
- 'shift_backward' will shift the nonexistent time backward to the
closest existing time
- 'NaT' will return NaT where there are nonexistent times
- timedelta objects will shift nonexistent times by the timedelta
- 'raise' will raise an NonExistentTimeError if there are
nonexistent times
.. note:: this option only works with pandas 0.24.0+
Returns
-------
Series
a Series with the same index for a Series.
Raises
------
ValueError if the `freq` cannot be converted.
Examples
--------
>>> series = ps.Series(pd.date_range('1/1/2018 11:59:00', periods=3, freq='min'))
>>> series
0 2018-01-01 11:59:00
1 2018-01-01 12:00:00
2 2018-01-01 12:01:00
dtype: datetime64[ns]
>>> series.dt.floor("H")
0 2018-01-01 11:00:00
1 2018-01-01 12:00:00
2 2018-01-01 12:00:00
dtype: datetime64[ns]
"""
@no_type_check
def pandas_floor(s) -> "ps.Series[np.datetime64]":
return s.dt.floor(freq, *args, **kwargs)
return self._data.pandas_on_spark.transform_batch(pandas_floor)
def ceil(self, freq: Union[str, DateOffset], *args: Any, **kwargs: Any) -> "ps.Series":
"""
Perform ceil operation on the data to the specified freq.
Parameters
----------
freq : str or Offset
The frequency level to round the index to. Must be a fixed
frequency like 'S' (second) not 'ME' (month end).
nonexistent : 'shift_forward', 'shift_backward, 'NaT', timedelta, default 'raise'
A nonexistent time does not exist in a particular timezone
where clocks moved forward due to DST.
- 'shift_forward' will shift the nonexistent time forward to the
closest existing time
- 'shift_backward' will shift the nonexistent time backward to the
closest existing time
- 'NaT' will return NaT where there are nonexistent times
- timedelta objects will shift nonexistent times by the timedelta
- 'raise' will raise an NonExistentTimeError if there are
nonexistent times
.. note:: this option only works with pandas 0.24.0+
Returns
-------
Series
a Series with the same index for a Series.
Raises
------
ValueError if the `freq` cannot be converted.
Examples
--------
>>> series = ps.Series(pd.date_range('1/1/2018 11:59:00', periods=3, freq='min'))
>>> series
0 2018-01-01 11:59:00
1 2018-01-01 12:00:00
2 2018-01-01 12:01:00
dtype: datetime64[ns]
>>> series.dt.ceil("H")
0 2018-01-01 12:00:00
1 2018-01-01 12:00:00
2 2018-01-01 13:00:00
dtype: datetime64[ns]
"""
@no_type_check
def pandas_ceil(s) -> "ps.Series[np.datetime64]":
return s.dt.ceil(freq, *args, **kwargs)
return self._data.pandas_on_spark.transform_batch(pandas_ceil)
def month_name(self, locale: Optional[str] = None) -> "ps.Series":
"""
Return the month names of the series with specified locale.
Parameters
----------
locale : str, optional
Locale determining the language in which to return the month name.
Default is English locale.
Returns
-------
Series
Series of month names.
Examples
--------
>>> series = ps.Series(pd.date_range(start='2018-01', freq='M', periods=3))
>>> series
0 2018-01-31
1 2018-02-28
2 2018-03-31
dtype: datetime64[ns]
>>> series.dt.month_name()
0 January
1 February
2 March
dtype: object
"""
@no_type_check
def pandas_month_name(s) -> "ps.Series[str]":
return s.dt.month_name(locale=locale)
return self._data.pandas_on_spark.transform_batch(pandas_month_name)
def day_name(self, locale: Optional[str] = None) -> "ps.Series":
"""
Return the day names of the series with specified locale.
Parameters
----------
locale : str, optional
Locale determining the language in which to return the day name.
Default is English locale.
Returns
-------
Series
Series of day names.
Examples
--------
>>> series = ps.Series(pd.date_range(start='2018-01-01', freq='D', periods=3))
>>> series
0 2018-01-01
1 2018-01-02
2 2018-01-03
dtype: datetime64[ns]
>>> series.dt.day_name()
0 Monday
1 Tuesday
2 Wednesday
dtype: object
"""
@no_type_check
def pandas_day_name(s) -> "ps.Series[str]":
return s.dt.day_name(locale=locale)
return self._data.pandas_on_spark.transform_batch(pandas_day_name)
def _test() -> None:
import os
import doctest
import sys
from pyspark.sql import SparkSession
import pyspark.pandas.datetimes
os.chdir(os.environ["SPARK_HOME"])
globs = pyspark.pandas.datetimes.__dict__.copy()
globs["ps"] = pyspark.pandas
spark = (
SparkSession.builder.master("local[4]")
.appName("pyspark.pandas.datetimes tests")
.getOrCreate()
)
(failure_count, test_count) = doctest.testmod(
pyspark.pandas.datetimes,
globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE,
)
spark.stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
| apache-2.0 |
pjryan126/solid-start-careers | store/api/zillow/venv/lib/python2.7/site-packages/pandas/io/tests/test_pytables.py | 1 | 202104 | import nose
import sys
import os
import warnings
import tempfile
from contextlib import contextmanager
import datetime
import numpy as np
import pandas
import pandas as pd
from pandas import (Series, DataFrame, Panel, MultiIndex, Int64Index,
RangeIndex, Categorical, bdate_range,
date_range, timedelta_range, Index, DatetimeIndex,
isnull)
from pandas.compat import is_platform_windows, PY3, PY35
from pandas.io.pytables import _tables, TableIterator
try:
_tables()
except ImportError as e:
raise nose.SkipTest(e)
from pandas.io.pytables import (HDFStore, get_store, Term, read_hdf,
IncompatibilityWarning, PerformanceWarning,
AttributeConflictWarning, DuplicateWarning,
PossibleDataLossError, ClosedFileError)
from pandas.io import pytables as pytables
import pandas.core.common as com
import pandas.util.testing as tm
from pandas.util.testing import (assert_panel4d_equal,
assert_panel_equal,
assert_frame_equal,
assert_series_equal,
assert_produces_warning)
from pandas import concat, Timestamp
from pandas import compat
from pandas.compat import range, lrange, u
try:
import tables
except ImportError:
raise nose.SkipTest('no pytables')
from distutils.version import LooseVersion
_default_compressor = LooseVersion(tables.__version__) >= '2.2' \
and 'blosc' or 'zlib'
_multiprocess_can_split_ = False
# testing on windows/py3 seems to fault
# for using compression
skip_compression = PY3 and is_platform_windows()
# contextmanager to ensure the file cleanup
def safe_remove(path):
if path is not None:
try:
os.remove(path)
except:
pass
def safe_close(store):
try:
if store is not None:
store.close()
except:
pass
def create_tempfile(path):
""" create an unopened named temporary file """
return os.path.join(tempfile.gettempdir(), path)
@contextmanager
def ensure_clean_store(path, mode='a', complevel=None, complib=None,
fletcher32=False):
try:
# put in the temporary path if we don't have one already
if not len(os.path.dirname(path)):
path = create_tempfile(path)
store = HDFStore(path, mode=mode, complevel=complevel,
complib=complib, fletcher32=False)
yield store
finally:
safe_close(store)
if mode == 'w' or mode == 'a':
safe_remove(path)
@contextmanager
def ensure_clean_path(path):
"""
return essentially a named temporary file that is not opened
and deleted on existing; if path is a list, then create and
return list of filenames
"""
try:
if isinstance(path, list):
filenames = [create_tempfile(p) for p in path]
yield filenames
else:
filenames = [create_tempfile(path)]
yield filenames[0]
finally:
for f in filenames:
safe_remove(f)
# set these parameters so we don't have file sharing
tables.parameters.MAX_NUMEXPR_THREADS = 1
tables.parameters.MAX_BLOSC_THREADS = 1
tables.parameters.MAX_THREADS = 1
def _maybe_remove(store, key):
"""For tests using tables, try removing the table to be sure there is
no content from previous tests using the same table name."""
try:
store.remove(key)
except:
pass
def compat_assert_produces_warning(w, f):
""" don't produce a warning under PY3 """
if compat.PY3:
f()
else:
with tm.assert_produces_warning(expected_warning=w):
f()
class Base(tm.TestCase):
@classmethod
def setUpClass(cls):
super(Base, cls).setUpClass()
# Pytables 3.0.0 deprecates lots of things
tm.reset_testing_mode()
@classmethod
def tearDownClass(cls):
super(Base, cls).tearDownClass()
# Pytables 3.0.0 deprecates lots of things
tm.set_testing_mode()
def setUp(self):
warnings.filterwarnings(action='ignore', category=FutureWarning)
self.path = 'tmp.__%s__.h5' % tm.rands(10)
def tearDown(self):
pass
class TestHDFStore(Base, tm.TestCase):
def test_factory_fun(self):
path = create_tempfile(self.path)
try:
with get_store(path) as tbl:
raise ValueError('blah')
except ValueError:
pass
finally:
safe_remove(path)
try:
with get_store(path) as tbl:
tbl['a'] = tm.makeDataFrame()
with get_store(path) as tbl:
self.assertEqual(len(tbl), 1)
self.assertEqual(type(tbl['a']), DataFrame)
finally:
safe_remove(self.path)
def test_context(self):
path = create_tempfile(self.path)
try:
with HDFStore(path) as tbl:
raise ValueError('blah')
except ValueError:
pass
finally:
safe_remove(path)
try:
with HDFStore(path) as tbl:
tbl['a'] = tm.makeDataFrame()
with HDFStore(path) as tbl:
self.assertEqual(len(tbl), 1)
self.assertEqual(type(tbl['a']), DataFrame)
finally:
safe_remove(path)
def test_conv_read_write(self):
path = create_tempfile(self.path)
try:
def roundtrip(key, obj, **kwargs):
obj.to_hdf(path, key, **kwargs)
return read_hdf(path, key)
o = tm.makeTimeSeries()
assert_series_equal(o, roundtrip('series', o))
o = tm.makeStringSeries()
assert_series_equal(o, roundtrip('string_series', o))
o = tm.makeDataFrame()
assert_frame_equal(o, roundtrip('frame', o))
o = tm.makePanel()
assert_panel_equal(o, roundtrip('panel', o))
# table
df = DataFrame(dict(A=lrange(5), B=lrange(5)))
df.to_hdf(path, 'table', append=True)
result = read_hdf(path, 'table', where=['index>2'])
assert_frame_equal(df[df.index > 2], result)
finally:
safe_remove(path)
def test_long_strings(self):
# GH6166
# unconversion of long strings was being chopped in earlier
# versions of numpy < 1.7.2
df = DataFrame({'a': tm.rands_array(100, size=10)},
index=tm.rands_array(100, size=10))
with ensure_clean_store(self.path) as store:
store.append('df', df, data_columns=['a'])
result = store.select('df')
assert_frame_equal(df, result)
def test_api(self):
# GH4584
# API issue when to_hdf doesn't acdept append AND format args
with ensure_clean_path(self.path) as path:
df = tm.makeDataFrame()
df.iloc[:10].to_hdf(path, 'df', append=True, format='table')
df.iloc[10:].to_hdf(path, 'df', append=True, format='table')
assert_frame_equal(read_hdf(path, 'df'), df)
# append to False
df.iloc[:10].to_hdf(path, 'df', append=False, format='table')
df.iloc[10:].to_hdf(path, 'df', append=True, format='table')
assert_frame_equal(read_hdf(path, 'df'), df)
with ensure_clean_path(self.path) as path:
df = tm.makeDataFrame()
df.iloc[:10].to_hdf(path, 'df', append=True)
df.iloc[10:].to_hdf(path, 'df', append=True, format='table')
assert_frame_equal(read_hdf(path, 'df'), df)
# append to False
df.iloc[:10].to_hdf(path, 'df', append=False, format='table')
df.iloc[10:].to_hdf(path, 'df', append=True)
assert_frame_equal(read_hdf(path, 'df'), df)
with ensure_clean_path(self.path) as path:
df = tm.makeDataFrame()
df.to_hdf(path, 'df', append=False, format='fixed')
assert_frame_equal(read_hdf(path, 'df'), df)
df.to_hdf(path, 'df', append=False, format='f')
assert_frame_equal(read_hdf(path, 'df'), df)
df.to_hdf(path, 'df', append=False)
assert_frame_equal(read_hdf(path, 'df'), df)
df.to_hdf(path, 'df')
assert_frame_equal(read_hdf(path, 'df'), df)
with ensure_clean_store(self.path) as store:
path = store._path
df = tm.makeDataFrame()
_maybe_remove(store, 'df')
store.append('df', df.iloc[:10], append=True, format='table')
store.append('df', df.iloc[10:], append=True, format='table')
assert_frame_equal(store.select('df'), df)
# append to False
_maybe_remove(store, 'df')
store.append('df', df.iloc[:10], append=False, format='table')
store.append('df', df.iloc[10:], append=True, format='table')
assert_frame_equal(store.select('df'), df)
# formats
_maybe_remove(store, 'df')
store.append('df', df.iloc[:10], append=False, format='table')
store.append('df', df.iloc[10:], append=True, format='table')
assert_frame_equal(store.select('df'), df)
_maybe_remove(store, 'df')
store.append('df', df.iloc[:10], append=False, format='table')
store.append('df', df.iloc[10:], append=True, format=None)
assert_frame_equal(store.select('df'), df)
with ensure_clean_path(self.path) as path:
# invalid
df = tm.makeDataFrame()
self.assertRaises(ValueError, df.to_hdf, path,
'df', append=True, format='f')
self.assertRaises(ValueError, df.to_hdf, path,
'df', append=True, format='fixed')
self.assertRaises(TypeError, df.to_hdf, path,
'df', append=True, format='foo')
self.assertRaises(TypeError, df.to_hdf, path,
'df', append=False, format='bar')
# File path doesn't exist
path = ""
self.assertRaises(IOError, read_hdf, path, 'df')
def test_api_default_format(self):
# default_format option
with ensure_clean_store(self.path) as store:
df = tm.makeDataFrame()
pandas.set_option('io.hdf.default_format', 'fixed')
_maybe_remove(store, 'df')
store.put('df', df)
self.assertFalse(store.get_storer('df').is_table)
self.assertRaises(ValueError, store.append, 'df2', df)
pandas.set_option('io.hdf.default_format', 'table')
_maybe_remove(store, 'df')
store.put('df', df)
self.assertTrue(store.get_storer('df').is_table)
_maybe_remove(store, 'df2')
store.append('df2', df)
self.assertTrue(store.get_storer('df').is_table)
pandas.set_option('io.hdf.default_format', None)
with ensure_clean_path(self.path) as path:
df = tm.makeDataFrame()
pandas.set_option('io.hdf.default_format', 'fixed')
df.to_hdf(path, 'df')
with get_store(path) as store:
self.assertFalse(store.get_storer('df').is_table)
self.assertRaises(ValueError, df.to_hdf, path, 'df2', append=True)
pandas.set_option('io.hdf.default_format', 'table')
df.to_hdf(path, 'df3')
with HDFStore(path) as store:
self.assertTrue(store.get_storer('df3').is_table)
df.to_hdf(path, 'df4', append=True)
with HDFStore(path) as store:
self.assertTrue(store.get_storer('df4').is_table)
pandas.set_option('io.hdf.default_format', None)
def test_keys(self):
with ensure_clean_store(self.path) as store:
store['a'] = tm.makeTimeSeries()
store['b'] = tm.makeStringSeries()
store['c'] = tm.makeDataFrame()
store['d'] = tm.makePanel()
store['foo/bar'] = tm.makePanel()
self.assertEqual(len(store), 5)
expected = set(['/a', '/b', '/c', '/d', '/foo/bar'])
self.assertTrue(set(store.keys()) == expected)
self.assertTrue(set(store) == expected)
def test_iter_empty(self):
with ensure_clean_store(self.path) as store:
# GH 12221
self.assertTrue(list(store) == [])
def test_repr(self):
with ensure_clean_store(self.path) as store:
repr(store)
store['a'] = tm.makeTimeSeries()
store['b'] = tm.makeStringSeries()
store['c'] = tm.makeDataFrame()
store['d'] = tm.makePanel()
store['foo/bar'] = tm.makePanel()
store.append('e', tm.makePanel())
df = tm.makeDataFrame()
df['obj1'] = 'foo'
df['obj2'] = 'bar'
df['bool1'] = df['A'] > 0
df['bool2'] = df['B'] > 0
df['bool3'] = True
df['int1'] = 1
df['int2'] = 2
df['timestamp1'] = Timestamp('20010102')
df['timestamp2'] = Timestamp('20010103')
df['datetime1'] = datetime.datetime(2001, 1, 2, 0, 0)
df['datetime2'] = datetime.datetime(2001, 1, 3, 0, 0)
df.ix[3:6, ['obj1']] = np.nan
df = df.consolidate()._convert(datetime=True)
warnings.filterwarnings('ignore', category=PerformanceWarning)
store['df'] = df
warnings.filterwarnings('always', category=PerformanceWarning)
# make a random group in hdf space
store._handle.create_group(store._handle.root, 'bah')
repr(store)
str(store)
# storers
with ensure_clean_store(self.path) as store:
df = tm.makeDataFrame()
store.append('df', df)
s = store.get_storer('df')
repr(s)
str(s)
def test_contains(self):
with ensure_clean_store(self.path) as store:
store['a'] = tm.makeTimeSeries()
store['b'] = tm.makeDataFrame()
store['foo/bar'] = tm.makeDataFrame()
self.assertIn('a', store)
self.assertIn('b', store)
self.assertNotIn('c', store)
self.assertIn('foo/bar', store)
self.assertIn('/foo/bar', store)
self.assertNotIn('/foo/b', store)
self.assertNotIn('bar', store)
# GH 2694
warnings.filterwarnings(
'ignore', category=tables.NaturalNameWarning)
store['node())'] = tm.makeDataFrame()
self.assertIn('node())', store)
def test_versioning(self):
with ensure_clean_store(self.path) as store:
store['a'] = tm.makeTimeSeries()
store['b'] = tm.makeDataFrame()
df = tm.makeTimeDataFrame()
_maybe_remove(store, 'df1')
store.append('df1', df[:10])
store.append('df1', df[10:])
self.assertEqual(store.root.a._v_attrs.pandas_version, '0.15.2')
self.assertEqual(store.root.b._v_attrs.pandas_version, '0.15.2')
self.assertEqual(store.root.df1._v_attrs.pandas_version, '0.15.2')
# write a file and wipe its versioning
_maybe_remove(store, 'df2')
store.append('df2', df)
# this is an error because its table_type is appendable, but no
# version info
store.get_node('df2')._v_attrs.pandas_version = None
self.assertRaises(Exception, store.select, 'df2')
def test_mode(self):
df = tm.makeTimeDataFrame()
def check(mode):
with ensure_clean_path(self.path) as path:
# constructor
if mode in ['r', 'r+']:
self.assertRaises(IOError, HDFStore, path, mode=mode)
else:
store = HDFStore(path, mode=mode)
self.assertEqual(store._handle.mode, mode)
store.close()
with ensure_clean_path(self.path) as path:
# context
if mode in ['r', 'r+']:
def f():
with HDFStore(path, mode=mode) as store: # noqa
pass
self.assertRaises(IOError, f)
else:
with HDFStore(path, mode=mode) as store:
self.assertEqual(store._handle.mode, mode)
with ensure_clean_path(self.path) as path:
# conv write
if mode in ['r', 'r+']:
self.assertRaises(IOError, df.to_hdf,
path, 'df', mode=mode)
df.to_hdf(path, 'df', mode='w')
else:
df.to_hdf(path, 'df', mode=mode)
# conv read
if mode in ['w']:
self.assertRaises(KeyError, read_hdf,
path, 'df', mode=mode)
else:
result = read_hdf(path, 'df', mode=mode)
assert_frame_equal(result, df)
check('r')
check('r+')
check('a')
check('w')
def test_reopen_handle(self):
with ensure_clean_path(self.path) as path:
store = HDFStore(path, mode='a')
store['a'] = tm.makeTimeSeries()
# invalid mode change
self.assertRaises(PossibleDataLossError, store.open, 'w')
store.close()
self.assertFalse(store.is_open)
# truncation ok here
store.open('w')
self.assertTrue(store.is_open)
self.assertEqual(len(store), 0)
store.close()
self.assertFalse(store.is_open)
store = HDFStore(path, mode='a')
store['a'] = tm.makeTimeSeries()
# reopen as read
store.open('r')
self.assertTrue(store.is_open)
self.assertEqual(len(store), 1)
self.assertEqual(store._mode, 'r')
store.close()
self.assertFalse(store.is_open)
# reopen as append
store.open('a')
self.assertTrue(store.is_open)
self.assertEqual(len(store), 1)
self.assertEqual(store._mode, 'a')
store.close()
self.assertFalse(store.is_open)
# reopen as append (again)
store.open('a')
self.assertTrue(store.is_open)
self.assertEqual(len(store), 1)
self.assertEqual(store._mode, 'a')
store.close()
self.assertFalse(store.is_open)
def test_open_args(self):
with ensure_clean_path(self.path) as path:
df = tm.makeDataFrame()
# create an in memory store
store = HDFStore(path, mode='a', driver='H5FD_CORE',
driver_core_backing_store=0)
store['df'] = df
store.append('df2', df)
tm.assert_frame_equal(store['df'], df)
tm.assert_frame_equal(store['df2'], df)
store.close()
# the file should not have actually been written
self.assertFalse(os.path.exists(path))
def test_flush(self):
with ensure_clean_store(self.path) as store:
store['a'] = tm.makeTimeSeries()
store.flush()
store.flush(fsync=True)
def test_get(self):
with ensure_clean_store(self.path) as store:
store['a'] = tm.makeTimeSeries()
left = store.get('a')
right = store['a']
tm.assert_series_equal(left, right)
left = store.get('/a')
right = store['/a']
tm.assert_series_equal(left, right)
self.assertRaises(KeyError, store.get, 'b')
def test_getattr(self):
with ensure_clean_store(self.path) as store:
s = tm.makeTimeSeries()
store['a'] = s
# test attribute access
result = store.a
tm.assert_series_equal(result, s)
result = getattr(store, 'a')
tm.assert_series_equal(result, s)
df = tm.makeTimeDataFrame()
store['df'] = df
result = store.df
tm.assert_frame_equal(result, df)
# errors
self.assertRaises(AttributeError, getattr, store, 'd')
for x in ['mode', 'path', 'handle', 'complib']:
self.assertRaises(AttributeError, getattr, store, x)
# not stores
for x in ['mode', 'path', 'handle', 'complib']:
getattr(store, "_%s" % x)
def test_put(self):
with ensure_clean_store(self.path) as store:
ts = tm.makeTimeSeries()
df = tm.makeTimeDataFrame()
store['a'] = ts
store['b'] = df[:10]
store['foo/bar/bah'] = df[:10]
store['foo'] = df[:10]
store['/foo'] = df[:10]
store.put('c', df[:10], format='table')
# not OK, not a table
self.assertRaises(
ValueError, store.put, 'b', df[10:], append=True)
# node does not currently exist, test _is_table_type returns False
# in this case
# _maybe_remove(store, 'f')
# self.assertRaises(ValueError, store.put, 'f', df[10:],
# append=True)
# can't put to a table (use append instead)
self.assertRaises(ValueError, store.put, 'c', df[10:], append=True)
# overwrite table
store.put('c', df[:10], format='table', append=False)
tm.assert_frame_equal(df[:10], store['c'])
def test_put_string_index(self):
with ensure_clean_store(self.path) as store:
index = Index(
["I am a very long string index: %s" % i for i in range(20)])
s = Series(np.arange(20), index=index)
df = DataFrame({'A': s, 'B': s})
store['a'] = s
tm.assert_series_equal(store['a'], s)
store['b'] = df
tm.assert_frame_equal(store['b'], df)
# mixed length
index = Index(['abcdefghijklmnopqrstuvwxyz1234567890'] +
["I am a very long string index: %s" % i
for i in range(20)])
s = Series(np.arange(21), index=index)
df = DataFrame({'A': s, 'B': s})
store['a'] = s
tm.assert_series_equal(store['a'], s)
store['b'] = df
tm.assert_frame_equal(store['b'], df)
def test_put_compression(self):
with ensure_clean_store(self.path) as store:
df = tm.makeTimeDataFrame()
store.put('c', df, format='table', complib='zlib')
tm.assert_frame_equal(store['c'], df)
# can't compress if format='fixed'
self.assertRaises(ValueError, store.put, 'b', df,
format='fixed', complib='zlib')
def test_put_compression_blosc(self):
tm.skip_if_no_package('tables', '2.2', app='blosc support')
if skip_compression:
raise nose.SkipTest("skipping on windows/PY3")
df = tm.makeTimeDataFrame()
with ensure_clean_store(self.path) as store:
# can't compress if format='fixed'
self.assertRaises(ValueError, store.put, 'b', df,
format='fixed', complib='blosc')
store.put('c', df, format='table', complib='blosc')
tm.assert_frame_equal(store['c'], df)
def test_put_integer(self):
# non-date, non-string index
df = DataFrame(np.random.randn(50, 100))
self._check_roundtrip(df, tm.assert_frame_equal)
def test_put_mixed_type(self):
df = tm.makeTimeDataFrame()
df['obj1'] = 'foo'
df['obj2'] = 'bar'
df['bool1'] = df['A'] > 0
df['bool2'] = df['B'] > 0
df['bool3'] = True
df['int1'] = 1
df['int2'] = 2
df['timestamp1'] = Timestamp('20010102')
df['timestamp2'] = Timestamp('20010103')
df['datetime1'] = datetime.datetime(2001, 1, 2, 0, 0)
df['datetime2'] = datetime.datetime(2001, 1, 3, 0, 0)
df.ix[3:6, ['obj1']] = np.nan
df = df.consolidate()._convert(datetime=True)
with ensure_clean_store(self.path) as store:
_maybe_remove(store, 'df')
# cannot use assert_produces_warning here for some reason
# a PendingDeprecationWarning is also raised?
warnings.filterwarnings('ignore', category=PerformanceWarning)
store.put('df', df)
warnings.filterwarnings('always', category=PerformanceWarning)
expected = store.get('df')
tm.assert_frame_equal(expected, df)
def test_append(self):
with ensure_clean_store(self.path) as store:
df = tm.makeTimeDataFrame()
_maybe_remove(store, 'df1')
store.append('df1', df[:10])
store.append('df1', df[10:])
tm.assert_frame_equal(store['df1'], df)
_maybe_remove(store, 'df2')
store.put('df2', df[:10], format='table')
store.append('df2', df[10:])
tm.assert_frame_equal(store['df2'], df)
_maybe_remove(store, 'df3')
store.append('/df3', df[:10])
store.append('/df3', df[10:])
tm.assert_frame_equal(store['df3'], df)
# this is allowed by almost always don't want to do it
with tm.assert_produces_warning(
expected_warning=tables.NaturalNameWarning):
_maybe_remove(store, '/df3 foo')
store.append('/df3 foo', df[:10])
store.append('/df3 foo', df[10:])
tm.assert_frame_equal(store['df3 foo'], df)
# panel
wp = tm.makePanel()
_maybe_remove(store, 'wp1')
store.append('wp1', wp.ix[:, :10, :])
store.append('wp1', wp.ix[:, 10:, :])
assert_panel_equal(store['wp1'], wp)
# ndim
p4d = tm.makePanel4D()
_maybe_remove(store, 'p4d')
store.append('p4d', p4d.ix[:, :, :10, :])
store.append('p4d', p4d.ix[:, :, 10:, :])
assert_panel4d_equal(store['p4d'], p4d)
# test using axis labels
_maybe_remove(store, 'p4d')
store.append('p4d', p4d.ix[:, :, :10, :], axes=[
'items', 'major_axis', 'minor_axis'])
store.append('p4d', p4d.ix[:, :, 10:, :], axes=[
'items', 'major_axis', 'minor_axis'])
assert_panel4d_equal(store['p4d'], p4d)
# test using differnt number of items on each axis
p4d2 = p4d.copy()
p4d2['l4'] = p4d['l1']
p4d2['l5'] = p4d['l1']
_maybe_remove(store, 'p4d2')
store.append(
'p4d2', p4d2, axes=['items', 'major_axis', 'minor_axis'])
assert_panel4d_equal(store['p4d2'], p4d2)
# test using differt order of items on the non-index axes
_maybe_remove(store, 'wp1')
wp_append1 = wp.ix[:, :10, :]
store.append('wp1', wp_append1)
wp_append2 = wp.ix[:, 10:, :].reindex(items=wp.items[::-1])
store.append('wp1', wp_append2)
assert_panel_equal(store['wp1'], wp)
# dtype issues - mizxed type in a single object column
df = DataFrame(data=[[1, 2], [0, 1], [1, 2], [0, 0]])
df['mixed_column'] = 'testing'
df.ix[2, 'mixed_column'] = np.nan
_maybe_remove(store, 'df')
store.append('df', df)
tm.assert_frame_equal(store['df'], df)
# uints - test storage of uints
uint_data = DataFrame({
'u08': Series(np.random.random_integers(0, high=255, size=5),
dtype=np.uint8),
'u16': Series(np.random.random_integers(0, high=65535, size=5),
dtype=np.uint16),
'u32': Series(np.random.random_integers(0, high=2**30, size=5),
dtype=np.uint32),
'u64': Series([2**58, 2**59, 2**60, 2**61, 2**62],
dtype=np.uint64)}, index=np.arange(5))
_maybe_remove(store, 'uints')
store.append('uints', uint_data)
tm.assert_frame_equal(store['uints'], uint_data)
# uints - test storage of uints in indexable columns
_maybe_remove(store, 'uints')
# 64-bit indices not yet supported
store.append('uints', uint_data, data_columns=[
'u08', 'u16', 'u32'])
tm.assert_frame_equal(store['uints'], uint_data)
def test_append_series(self):
with ensure_clean_store(self.path) as store:
# basic
ss = tm.makeStringSeries()
ts = tm.makeTimeSeries()
ns = Series(np.arange(100))
store.append('ss', ss)
result = store['ss']
tm.assert_series_equal(result, ss)
self.assertIsNone(result.name)
store.append('ts', ts)
result = store['ts']
tm.assert_series_equal(result, ts)
self.assertIsNone(result.name)
ns.name = 'foo'
store.append('ns', ns)
result = store['ns']
tm.assert_series_equal(result, ns)
self.assertEqual(result.name, ns.name)
# select on the values
expected = ns[ns > 60]
result = store.select('ns', Term('foo>60'))
tm.assert_series_equal(result, expected)
# select on the index and values
expected = ns[(ns > 70) & (ns.index < 90)]
result = store.select('ns', [Term('foo>70'), Term('index<90')])
tm.assert_series_equal(result, expected)
# multi-index
mi = DataFrame(np.random.randn(5, 1), columns=['A'])
mi['B'] = np.arange(len(mi))
mi['C'] = 'foo'
mi.loc[3:5, 'C'] = 'bar'
mi.set_index(['C', 'B'], inplace=True)
s = mi.stack()
s.index = s.index.droplevel(2)
store.append('mi', s)
tm.assert_series_equal(store['mi'], s)
def test_store_index_types(self):
# GH5386
# test storing various index types
with ensure_clean_store(self.path) as store:
def check(format, index):
df = DataFrame(np.random.randn(10, 2), columns=list('AB'))
df.index = index(len(df))
_maybe_remove(store, 'df')
store.put('df', df, format=format)
assert_frame_equal(df, store['df'])
for index in [tm.makeFloatIndex, tm.makeStringIndex,
tm.makeIntIndex, tm.makeDateIndex]:
check('table', index)
check('fixed', index)
# period index currently broken for table
# seee GH7796 FIXME
check('fixed', tm.makePeriodIndex)
# check('table',tm.makePeriodIndex)
# unicode
index = tm.makeUnicodeIndex
if compat.PY3:
check('table', index)
check('fixed', index)
else:
# only support for fixed types (and they have a perf warning)
self.assertRaises(TypeError, check, 'table', index)
with tm.assert_produces_warning(
expected_warning=PerformanceWarning):
check('fixed', index)
def test_encoding(self):
if sys.byteorder != 'little':
raise nose.SkipTest('system byteorder is not little')
with ensure_clean_store(self.path) as store:
df = DataFrame(dict(A='foo', B='bar'), index=range(5))
df.loc[2, 'A'] = np.nan
df.loc[3, 'B'] = np.nan
_maybe_remove(store, 'df')
store.append('df', df, encoding='ascii')
tm.assert_frame_equal(store['df'], df)
expected = df.reindex(columns=['A'])
result = store.select('df', Term('columns=A', encoding='ascii'))
tm.assert_frame_equal(result, expected)
def test_latin_encoding(self):
if compat.PY2:
self.assertRaisesRegexp(
TypeError, '\[unicode\] is not implemented as a table column')
return
values = [[b'E\xc9, 17', b'', b'a', b'b', b'c'],
[b'E\xc9, 17', b'a', b'b', b'c'],
[b'EE, 17', b'', b'a', b'b', b'c'],
[b'E\xc9, 17', b'\xf8\xfc', b'a', b'b', b'c'],
[b'', b'a', b'b', b'c'],
[b'\xf8\xfc', b'a', b'b', b'c'],
[b'A\xf8\xfc', b'', b'a', b'b', b'c'],
[np.nan, b'', b'b', b'c'],
[b'A\xf8\xfc', np.nan, b'', b'b', b'c']]
def _try_decode(x, encoding='latin-1'):
try:
return x.decode(encoding)
except AttributeError:
return x
# not sure how to remove latin-1 from code in python 2 and 3
values = [[_try_decode(x) for x in y] for y in values]
examples = []
for dtype in ['category', object]:
for val in values:
examples.append(pandas.Series(val, dtype=dtype))
def roundtrip(s, key='data', encoding='latin-1', nan_rep=''):
with ensure_clean_path(self.path) as store:
s.to_hdf(store, key, format='table', encoding=encoding,
nan_rep=nan_rep)
retr = read_hdf(store, key)
s_nan = s.replace(nan_rep, np.nan)
assert_series_equal(s_nan, retr)
for s in examples:
roundtrip(s)
# fails:
# for x in examples:
# roundtrip(s, nan_rep=b'\xf8\xfc')
def test_append_some_nans(self):
with ensure_clean_store(self.path) as store:
df = DataFrame({'A': Series(np.random.randn(20)).astype('int32'),
'A1': np.random.randn(20),
'A2': np.random.randn(20),
'B': 'foo', 'C': 'bar',
'D': Timestamp("20010101"),
'E': datetime.datetime(2001, 1, 2, 0, 0)},
index=np.arange(20))
# some nans
_maybe_remove(store, 'df1')
df.ix[0:15, ['A1', 'B', 'D', 'E']] = np.nan
store.append('df1', df[:10])
store.append('df1', df[10:])
tm.assert_frame_equal(store['df1'], df)
# first column
df1 = df.copy()
df1.ix[:, 'A1'] = np.nan
_maybe_remove(store, 'df1')
store.append('df1', df1[:10])
store.append('df1', df1[10:])
tm.assert_frame_equal(store['df1'], df1)
# 2nd column
df2 = df.copy()
df2.ix[:, 'A2'] = np.nan
_maybe_remove(store, 'df2')
store.append('df2', df2[:10])
store.append('df2', df2[10:])
tm.assert_frame_equal(store['df2'], df2)
# datetimes
df3 = df.copy()
df3.ix[:, 'E'] = np.nan
_maybe_remove(store, 'df3')
store.append('df3', df3[:10])
store.append('df3', df3[10:])
tm.assert_frame_equal(store['df3'], df3)
def test_append_all_nans(self):
with ensure_clean_store(self.path) as store:
df = DataFrame({'A1': np.random.randn(20),
'A2': np.random.randn(20)},
index=np.arange(20))
df.ix[0:15, :] = np.nan
# nan some entire rows (dropna=True)
_maybe_remove(store, 'df')
store.append('df', df[:10], dropna=True)
store.append('df', df[10:], dropna=True)
tm.assert_frame_equal(store['df'], df[-4:])
# nan some entire rows (dropna=False)
_maybe_remove(store, 'df2')
store.append('df2', df[:10], dropna=False)
store.append('df2', df[10:], dropna=False)
tm.assert_frame_equal(store['df2'], df)
# tests the option io.hdf.dropna_table
pandas.set_option('io.hdf.dropna_table', False)
_maybe_remove(store, 'df3')
store.append('df3', df[:10])
store.append('df3', df[10:])
tm.assert_frame_equal(store['df3'], df)
pandas.set_option('io.hdf.dropna_table', True)
_maybe_remove(store, 'df4')
store.append('df4', df[:10])
store.append('df4', df[10:])
tm.assert_frame_equal(store['df4'], df[-4:])
# nan some entire rows (string are still written!)
df = DataFrame({'A1': np.random.randn(20),
'A2': np.random.randn(20),
'B': 'foo', 'C': 'bar'},
index=np.arange(20))
df.ix[0:15, :] = np.nan
_maybe_remove(store, 'df')
store.append('df', df[:10], dropna=True)
store.append('df', df[10:], dropna=True)
tm.assert_frame_equal(store['df'], df)
_maybe_remove(store, 'df2')
store.append('df2', df[:10], dropna=False)
store.append('df2', df[10:], dropna=False)
tm.assert_frame_equal(store['df2'], df)
# nan some entire rows (but since we have dates they are still
# written!)
df = DataFrame({'A1': np.random.randn(20),
'A2': np.random.randn(20),
'B': 'foo', 'C': 'bar',
'D': Timestamp("20010101"),
'E': datetime.datetime(2001, 1, 2, 0, 0)},
index=np.arange(20))
df.ix[0:15, :] = np.nan
_maybe_remove(store, 'df')
store.append('df', df[:10], dropna=True)
store.append('df', df[10:], dropna=True)
tm.assert_frame_equal(store['df'], df)
_maybe_remove(store, 'df2')
store.append('df2', df[:10], dropna=False)
store.append('df2', df[10:], dropna=False)
tm.assert_frame_equal(store['df2'], df)
# Test to make sure defaults are to not drop.
# Corresponding to Issue 9382
df_with_missing = DataFrame(
{'col1': [0, np.nan, 2], 'col2': [1, np.nan, np.nan]})
with ensure_clean_path(self.path) as path:
df_with_missing.to_hdf(path, 'df_with_missing', format='table')
reloaded = read_hdf(path, 'df_with_missing')
tm.assert_frame_equal(df_with_missing, reloaded)
matrix = [[[np.nan, np.nan, np.nan], [1, np.nan, np.nan]],
[[np.nan, np.nan, np.nan], [np.nan, 5, 6]],
[[np.nan, np.nan, np.nan], [np.nan, 3, np.nan]]]
panel_with_missing = Panel(matrix, items=['Item1', 'Item2', 'Item3'],
major_axis=[1, 2],
minor_axis=['A', 'B', 'C'])
with ensure_clean_path(self.path) as path:
panel_with_missing.to_hdf(
path, 'panel_with_missing', format='table')
reloaded_panel = read_hdf(path, 'panel_with_missing')
tm.assert_panel_equal(panel_with_missing, reloaded_panel)
def test_append_frame_column_oriented(self):
with ensure_clean_store(self.path) as store:
# column oriented
df = tm.makeTimeDataFrame()
_maybe_remove(store, 'df1')
store.append('df1', df.ix[:, :2], axes=['columns'])
store.append('df1', df.ix[:, 2:])
tm.assert_frame_equal(store['df1'], df)
result = store.select('df1', 'columns=A')
expected = df.reindex(columns=['A'])
tm.assert_frame_equal(expected, result)
# selection on the non-indexable
result = store.select(
'df1', ('columns=A', Term('index=df.index[0:4]')))
expected = df.reindex(columns=['A'], index=df.index[0:4])
tm.assert_frame_equal(expected, result)
# this isn't supported
self.assertRaises(TypeError, store.select, 'df1', (
'columns=A', Term('index>df.index[4]')))
def test_append_with_different_block_ordering(self):
# GH 4096; using same frames, but different block orderings
with ensure_clean_store(self.path) as store:
for i in range(10):
df = DataFrame(np.random.randn(10, 2), columns=list('AB'))
df['index'] = range(10)
df['index'] += i * 10
df['int64'] = Series([1] * len(df), dtype='int64')
df['int16'] = Series([1] * len(df), dtype='int16')
if i % 2 == 0:
del df['int64']
df['int64'] = Series([1] * len(df), dtype='int64')
if i % 3 == 0:
a = df.pop('A')
df['A'] = a
df.set_index('index', inplace=True)
store.append('df', df)
# test a different ordering but with more fields (like invalid
# combinate)
with ensure_clean_store(self.path) as store:
df = DataFrame(np.random.randn(10, 2),
columns=list('AB'), dtype='float64')
df['int64'] = Series([1] * len(df), dtype='int64')
df['int16'] = Series([1] * len(df), dtype='int16')
store.append('df', df)
# store additonal fields in different blocks
df['int16_2'] = Series([1] * len(df), dtype='int16')
self.assertRaises(ValueError, store.append, 'df', df)
# store multile additonal fields in different blocks
df['float_3'] = Series([1.] * len(df), dtype='float64')
self.assertRaises(ValueError, store.append, 'df', df)
def test_ndim_indexables(self):
""" test using ndim tables in new ways"""
with ensure_clean_store(self.path) as store:
p4d = tm.makePanel4D()
def check_indexers(key, indexers):
for i, idx in enumerate(indexers):
self.assertTrue(getattr(getattr(
store.root, key).table.description, idx)._v_pos == i)
# append then change (will take existing schema)
indexers = ['items', 'major_axis', 'minor_axis']
_maybe_remove(store, 'p4d')
store.append('p4d', p4d.ix[:, :, :10, :], axes=indexers)
store.append('p4d', p4d.ix[:, :, 10:, :])
assert_panel4d_equal(store.select('p4d'), p4d)
check_indexers('p4d', indexers)
# same as above, but try to append with differnt axes
_maybe_remove(store, 'p4d')
store.append('p4d', p4d.ix[:, :, :10, :], axes=indexers)
store.append('p4d', p4d.ix[:, :, 10:, :], axes=[
'labels', 'items', 'major_axis'])
assert_panel4d_equal(store.select('p4d'), p4d)
check_indexers('p4d', indexers)
# pass incorrect number of axes
_maybe_remove(store, 'p4d')
self.assertRaises(ValueError, store.append, 'p4d', p4d.ix[
:, :, :10, :], axes=['major_axis', 'minor_axis'])
# different than default indexables #1
indexers = ['labels', 'major_axis', 'minor_axis']
_maybe_remove(store, 'p4d')
store.append('p4d', p4d.ix[:, :, :10, :], axes=indexers)
store.append('p4d', p4d.ix[:, :, 10:, :])
assert_panel4d_equal(store['p4d'], p4d)
check_indexers('p4d', indexers)
# different than default indexables #2
indexers = ['major_axis', 'labels', 'minor_axis']
_maybe_remove(store, 'p4d')
store.append('p4d', p4d.ix[:, :, :10, :], axes=indexers)
store.append('p4d', p4d.ix[:, :, 10:, :])
assert_panel4d_equal(store['p4d'], p4d)
check_indexers('p4d', indexers)
# partial selection
result = store.select('p4d', ['labels=l1'])
expected = p4d.reindex(labels=['l1'])
assert_panel4d_equal(result, expected)
# partial selection2
result = store.select('p4d', [Term(
'labels=l1'), Term('items=ItemA'), Term('minor_axis=B')])
expected = p4d.reindex(
labels=['l1'], items=['ItemA'], minor_axis=['B'])
assert_panel4d_equal(result, expected)
# non-existant partial selection
result = store.select('p4d', [Term(
'labels=l1'), Term('items=Item1'), Term('minor_axis=B')])
expected = p4d.reindex(labels=['l1'], items=[], minor_axis=['B'])
assert_panel4d_equal(result, expected)
def test_append_with_strings(self):
with ensure_clean_store(self.path) as store:
wp = tm.makePanel()
wp2 = wp.rename_axis(
dict([(x, "%s_extra" % x) for x in wp.minor_axis]), axis=2)
def check_col(key, name, size):
self.assertEqual(getattr(store.get_storer(
key).table.description, name).itemsize, size)
store.append('s1', wp, min_itemsize=20)
store.append('s1', wp2)
expected = concat([wp, wp2], axis=2)
expected = expected.reindex(minor_axis=sorted(expected.minor_axis))
assert_panel_equal(store['s1'], expected)
check_col('s1', 'minor_axis', 20)
# test dict format
store.append('s2', wp, min_itemsize={'minor_axis': 20})
store.append('s2', wp2)
expected = concat([wp, wp2], axis=2)
expected = expected.reindex(minor_axis=sorted(expected.minor_axis))
assert_panel_equal(store['s2'], expected)
check_col('s2', 'minor_axis', 20)
# apply the wrong field (similar to #1)
store.append('s3', wp, min_itemsize={'major_axis': 20})
self.assertRaises(ValueError, store.append, 's3', wp2)
# test truncation of bigger strings
store.append('s4', wp)
self.assertRaises(ValueError, store.append, 's4', wp2)
# avoid truncation on elements
df = DataFrame([[123, 'asdqwerty'], [345, 'dggnhebbsdfbdfb']])
store.append('df_big', df)
tm.assert_frame_equal(store.select('df_big'), df)
check_col('df_big', 'values_block_1', 15)
# appending smaller string ok
df2 = DataFrame([[124, 'asdqy'], [346, 'dggnhefbdfb']])
store.append('df_big', df2)
expected = concat([df, df2])
tm.assert_frame_equal(store.select('df_big'), expected)
check_col('df_big', 'values_block_1', 15)
# avoid truncation on elements
df = DataFrame([[123, 'asdqwerty'], [345, 'dggnhebbsdfbdfb']])
store.append('df_big2', df, min_itemsize={'values': 50})
tm.assert_frame_equal(store.select('df_big2'), df)
check_col('df_big2', 'values_block_1', 50)
# bigger string on next append
store.append('df_new', df)
df_new = DataFrame(
[[124, 'abcdefqhij'], [346, 'abcdefghijklmnopqrtsuvwxyz']])
self.assertRaises(ValueError, store.append, 'df_new', df_new)
# with nans
_maybe_remove(store, 'df')
df = tm.makeTimeDataFrame()
df['string'] = 'foo'
df.ix[1:4, 'string'] = np.nan
df['string2'] = 'bar'
df.ix[4:8, 'string2'] = np.nan
df['string3'] = 'bah'
df.ix[1:, 'string3'] = np.nan
store.append('df', df)
result = store.select('df')
tm.assert_frame_equal(result, df)
with ensure_clean_store(self.path) as store:
def check_col(key, name, size):
self.assertEqual(getattr(store.get_storer(
key).table.description, name).itemsize, size)
df = DataFrame(dict(A='foo', B='bar'), index=range(10))
# a min_itemsize that creates a data_column
_maybe_remove(store, 'df')
store.append('df', df, min_itemsize={'A': 200})
check_col('df', 'A', 200)
self.assertEqual(store.get_storer('df').data_columns, ['A'])
# a min_itemsize that creates a data_column2
_maybe_remove(store, 'df')
store.append('df', df, data_columns=['B'], min_itemsize={'A': 200})
check_col('df', 'A', 200)
self.assertEqual(store.get_storer('df').data_columns, ['B', 'A'])
# a min_itemsize that creates a data_column2
_maybe_remove(store, 'df')
store.append('df', df, data_columns=[
'B'], min_itemsize={'values': 200})
check_col('df', 'B', 200)
check_col('df', 'values_block_0', 200)
self.assertEqual(store.get_storer('df').data_columns, ['B'])
# infer the .typ on subsequent appends
_maybe_remove(store, 'df')
store.append('df', df[:5], min_itemsize=200)
store.append('df', df[5:], min_itemsize=200)
tm.assert_frame_equal(store['df'], df)
# invalid min_itemsize keys
df = DataFrame(['foo', 'foo', 'foo', 'barh',
'barh', 'barh'], columns=['A'])
_maybe_remove(store, 'df')
self.assertRaises(ValueError, store.append, 'df',
df, min_itemsize={'foo': 20, 'foobar': 20})
def test_append_with_data_columns(self):
with ensure_clean_store(self.path) as store:
df = tm.makeTimeDataFrame()
df.loc[:, 'B'].iloc[0] = 1.
_maybe_remove(store, 'df')
store.append('df', df[:2], data_columns=['B'])
store.append('df', df[2:])
tm.assert_frame_equal(store['df'], df)
# check that we have indicies created
assert(store._handle.root.df.table.cols.index.is_indexed is True)
assert(store._handle.root.df.table.cols.B.is_indexed is True)
# data column searching
result = store.select('df', [Term('B>0')])
expected = df[df.B > 0]
tm.assert_frame_equal(result, expected)
# data column searching (with an indexable and a data_columns)
result = store.select(
'df', [Term('B>0'), Term('index>df.index[3]')])
df_new = df.reindex(index=df.index[4:])
expected = df_new[df_new.B > 0]
tm.assert_frame_equal(result, expected)
# data column selection with a string data_column
df_new = df.copy()
df_new['string'] = 'foo'
df_new.loc[1:4, 'string'] = np.nan
df_new.loc[5:6, 'string'] = 'bar'
_maybe_remove(store, 'df')
store.append('df', df_new, data_columns=['string'])
result = store.select('df', [Term('string=foo')])
expected = df_new[df_new.string == 'foo']
tm.assert_frame_equal(result, expected)
# using min_itemsize and a data column
def check_col(key, name, size):
self.assertEqual(getattr(store.get_storer(
key).table.description, name).itemsize, size)
with ensure_clean_store(self.path) as store:
_maybe_remove(store, 'df')
store.append('df', df_new, data_columns=['string'],
min_itemsize={'string': 30})
check_col('df', 'string', 30)
_maybe_remove(store, 'df')
store.append(
'df', df_new, data_columns=['string'], min_itemsize=30)
check_col('df', 'string', 30)
_maybe_remove(store, 'df')
store.append('df', df_new, data_columns=['string'],
min_itemsize={'values': 30})
check_col('df', 'string', 30)
with ensure_clean_store(self.path) as store:
df_new['string2'] = 'foobarbah'
df_new['string_block1'] = 'foobarbah1'
df_new['string_block2'] = 'foobarbah2'
_maybe_remove(store, 'df')
store.append('df', df_new, data_columns=['string', 'string2'],
min_itemsize={'string': 30, 'string2': 40,
'values': 50})
check_col('df', 'string', 30)
check_col('df', 'string2', 40)
check_col('df', 'values_block_1', 50)
with ensure_clean_store(self.path) as store:
# multiple data columns
df_new = df.copy()
df_new.ix[0, 'A'] = 1.
df_new.ix[0, 'B'] = -1.
df_new['string'] = 'foo'
df_new.loc[1:4, 'string'] = np.nan
df_new.loc[5:6, 'string'] = 'bar'
df_new['string2'] = 'foo'
df_new.loc[2:5, 'string2'] = np.nan
df_new.loc[7:8, 'string2'] = 'bar'
_maybe_remove(store, 'df')
store.append(
'df', df_new, data_columns=['A', 'B', 'string', 'string2'])
result = store.select('df', [Term('string=foo'), Term(
'string2=foo'), Term('A>0'), Term('B<0')])
expected = df_new[(df_new.string == 'foo') & (
df_new.string2 == 'foo') & (df_new.A > 0) & (df_new.B < 0)]
tm.assert_frame_equal(result, expected, check_index_type=False)
# yield an empty frame
result = store.select('df', [Term('string=foo'), Term(
'string2=cool')])
expected = df_new[(df_new.string == 'foo') & (
df_new.string2 == 'cool')]
tm.assert_frame_equal(result, expected, check_index_type=False)
with ensure_clean_store(self.path) as store:
# doc example
df_dc = df.copy()
df_dc['string'] = 'foo'
df_dc.ix[4:6, 'string'] = np.nan
df_dc.ix[7:9, 'string'] = 'bar'
df_dc['string2'] = 'cool'
df_dc['datetime'] = Timestamp('20010102')
df_dc = df_dc._convert(datetime=True)
df_dc.ix[3:5, ['A', 'B', 'datetime']] = np.nan
_maybe_remove(store, 'df_dc')
store.append('df_dc', df_dc,
data_columns=['B', 'C', 'string',
'string2', 'datetime'])
result = store.select('df_dc', [Term('B>0')])
expected = df_dc[df_dc.B > 0]
tm.assert_frame_equal(result, expected, check_index_type=False)
result = store.select(
'df_dc', ['B > 0', 'C > 0', 'string == foo'])
expected = df_dc[(df_dc.B > 0) & (df_dc.C > 0) & (
df_dc.string == 'foo')]
tm.assert_frame_equal(result, expected, check_index_type=False)
with ensure_clean_store(self.path) as store:
# doc example part 2
np.random.seed(1234)
index = date_range('1/1/2000', periods=8)
df_dc = DataFrame(np.random.randn(8, 3), index=index,
columns=['A', 'B', 'C'])
df_dc['string'] = 'foo'
df_dc.ix[4:6, 'string'] = np.nan
df_dc.ix[7:9, 'string'] = 'bar'
df_dc.ix[:, ['B', 'C']] = df_dc.ix[:, ['B', 'C']].abs()
df_dc['string2'] = 'cool'
# on-disk operations
store.append('df_dc', df_dc, data_columns=[
'B', 'C', 'string', 'string2'])
result = store.select('df_dc', [Term('B>0')])
expected = df_dc[df_dc.B > 0]
tm.assert_frame_equal(result, expected)
result = store.select(
'df_dc', ['B > 0', 'C > 0', 'string == "foo"'])
expected = df_dc[(df_dc.B > 0) & (df_dc.C > 0) &
(df_dc.string == 'foo')]
tm.assert_frame_equal(result, expected)
with ensure_clean_store(self.path) as store:
# panel
# GH5717 not handling data_columns
np.random.seed(1234)
p = tm.makePanel()
store.append('p1', p)
tm.assert_panel_equal(store.select('p1'), p)
store.append('p2', p, data_columns=True)
tm.assert_panel_equal(store.select('p2'), p)
result = store.select('p2', where='ItemA>0')
expected = p.to_frame()
expected = expected[expected['ItemA'] > 0]
tm.assert_frame_equal(result.to_frame(), expected)
result = store.select('p2', where='ItemA>0 & minor_axis=["A","B"]')
expected = p.to_frame()
expected = expected[expected['ItemA'] > 0]
expected = expected[expected.reset_index(
level=['major']).index.isin(['A', 'B'])]
tm.assert_frame_equal(result.to_frame(), expected)
def test_create_table_index(self):
with ensure_clean_store(self.path) as store:
def col(t, column):
return getattr(store.get_storer(t).table.cols, column)
# index=False
wp = tm.makePanel()
store.append('p5', wp, index=False)
store.create_table_index('p5', columns=['major_axis'])
assert(col('p5', 'major_axis').is_indexed is True)
assert(col('p5', 'minor_axis').is_indexed is False)
# index=True
store.append('p5i', wp, index=True)
assert(col('p5i', 'major_axis').is_indexed is True)
assert(col('p5i', 'minor_axis').is_indexed is True)
# default optlevels
store.get_storer('p5').create_index()
assert(col('p5', 'major_axis').index.optlevel == 6)
assert(col('p5', 'minor_axis').index.kind == 'medium')
# let's change the indexing scheme
store.create_table_index('p5')
assert(col('p5', 'major_axis').index.optlevel == 6)
assert(col('p5', 'minor_axis').index.kind == 'medium')
store.create_table_index('p5', optlevel=9)
assert(col('p5', 'major_axis').index.optlevel == 9)
assert(col('p5', 'minor_axis').index.kind == 'medium')
store.create_table_index('p5', kind='full')
assert(col('p5', 'major_axis').index.optlevel == 9)
assert(col('p5', 'minor_axis').index.kind == 'full')
store.create_table_index('p5', optlevel=1, kind='light')
assert(col('p5', 'major_axis').index.optlevel == 1)
assert(col('p5', 'minor_axis').index.kind == 'light')
# data columns
df = tm.makeTimeDataFrame()
df['string'] = 'foo'
df['string2'] = 'bar'
store.append('f', df, data_columns=['string', 'string2'])
assert(col('f', 'index').is_indexed is True)
assert(col('f', 'string').is_indexed is True)
assert(col('f', 'string2').is_indexed is True)
# specify index=columns
store.append(
'f2', df, index=['string'], data_columns=['string', 'string2'])
assert(col('f2', 'index').is_indexed is False)
assert(col('f2', 'string').is_indexed is True)
assert(col('f2', 'string2').is_indexed is False)
# try to index a non-table
_maybe_remove(store, 'f2')
store.put('f2', df)
self.assertRaises(TypeError, store.create_table_index, 'f2')
def test_append_diff_item_order(self):
wp = tm.makePanel()
wp1 = wp.ix[:, :10, :]
wp2 = wp.ix[['ItemC', 'ItemB', 'ItemA'], 10:, :]
with ensure_clean_store(self.path) as store:
store.put('panel', wp1, format='table')
self.assertRaises(ValueError, store.put, 'panel', wp2,
append=True)
def test_append_hierarchical(self):
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
['one', 'two', 'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['foo', 'bar'])
df = DataFrame(np.random.randn(10, 3), index=index,
columns=['A', 'B', 'C'])
with ensure_clean_store(self.path) as store:
store.append('mi', df)
result = store.select('mi')
tm.assert_frame_equal(result, df)
# GH 3748
result = store.select('mi', columns=['A', 'B'])
expected = df.reindex(columns=['A', 'B'])
tm.assert_frame_equal(result, expected)
with ensure_clean_path('test.hdf') as path:
df.to_hdf(path, 'df', format='table')
result = read_hdf(path, 'df', columns=['A', 'B'])
expected = df.reindex(columns=['A', 'B'])
tm.assert_frame_equal(result, expected)
def test_column_multiindex(self):
# GH 4710
# recreate multi-indexes properly
index = MultiIndex.from_tuples([('A', 'a'), ('A', 'b'),
('B', 'a'), ('B', 'b')],
names=['first', 'second'])
df = DataFrame(np.arange(12).reshape(3, 4), columns=index)
expected = df.copy()
if isinstance(expected.index, RangeIndex):
expected.index = Int64Index(expected.index)
with ensure_clean_store(self.path) as store:
store.put('df', df)
tm.assert_frame_equal(store['df'], expected,
check_index_type=True,
check_column_type=True)
store.put('df1', df, format='table')
tm.assert_frame_equal(store['df1'], expected,
check_index_type=True,
check_column_type=True)
self.assertRaises(ValueError, store.put, 'df2', df,
format='table', data_columns=['A'])
self.assertRaises(ValueError, store.put, 'df3', df,
format='table', data_columns=True)
# appending multi-column on existing table (see GH 6167)
with ensure_clean_store(self.path) as store:
store.append('df2', df)
store.append('df2', df)
tm.assert_frame_equal(store['df2'], concat((df, df)))
# non_index_axes name
df = DataFrame(np.arange(12).reshape(3, 4),
columns=Index(list('ABCD'), name='foo'))
expected = df.copy()
if isinstance(expected.index, RangeIndex):
expected.index = Int64Index(expected.index)
with ensure_clean_store(self.path) as store:
store.put('df1', df, format='table')
tm.assert_frame_equal(store['df1'], expected,
check_index_type=True,
check_column_type=True)
def test_store_multiindex(self):
# validate multi-index names
# GH 5527
with ensure_clean_store(self.path) as store:
def make_index(names=None):
return MultiIndex.from_tuples([(datetime.datetime(2013, 12, d),
s, t)
for d in range(1, 3)
for s in range(2)
for t in range(3)],
names=names)
# no names
_maybe_remove(store, 'df')
df = DataFrame(np.zeros((12, 2)), columns=[
'a', 'b'], index=make_index())
store.append('df', df)
tm.assert_frame_equal(store.select('df'), df)
# partial names
_maybe_remove(store, 'df')
df = DataFrame(np.zeros((12, 2)), columns=[
'a', 'b'], index=make_index(['date', None, None]))
store.append('df', df)
tm.assert_frame_equal(store.select('df'), df)
# series
_maybe_remove(store, 's')
s = Series(np.zeros(12), index=make_index(['date', None, None]))
store.append('s', s)
xp = Series(np.zeros(12), index=make_index(
['date', 'level_1', 'level_2']))
tm.assert_series_equal(store.select('s'), xp)
# dup with column
_maybe_remove(store, 'df')
df = DataFrame(np.zeros((12, 2)), columns=[
'a', 'b'], index=make_index(['date', 'a', 't']))
self.assertRaises(ValueError, store.append, 'df', df)
# dup within level
_maybe_remove(store, 'df')
df = DataFrame(np.zeros((12, 2)), columns=['a', 'b'],
index=make_index(['date', 'date', 'date']))
self.assertRaises(ValueError, store.append, 'df', df)
# fully names
_maybe_remove(store, 'df')
df = DataFrame(np.zeros((12, 2)), columns=[
'a', 'b'], index=make_index(['date', 's', 't']))
store.append('df', df)
tm.assert_frame_equal(store.select('df'), df)
def test_select_columns_in_where(self):
# GH 6169
# recreate multi-indexes when columns is passed
# in the `where` argument
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
['one', 'two', 'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['foo_name', 'bar_name'])
# With a DataFrame
df = DataFrame(np.random.randn(10, 3), index=index,
columns=['A', 'B', 'C'])
with ensure_clean_store(self.path) as store:
store.put('df', df, format='table')
expected = df[['A']]
tm.assert_frame_equal(store.select('df', columns=['A']), expected)
tm.assert_frame_equal(store.select(
'df', where="columns=['A']"), expected)
# With a Series
s = Series(np.random.randn(10), index=index,
name='A')
with ensure_clean_store(self.path) as store:
store.put('s', s, format='table')
tm.assert_series_equal(store.select('s', where="columns=['A']"), s)
def test_pass_spec_to_storer(self):
df = tm.makeDataFrame()
with ensure_clean_store(self.path) as store:
store.put('df', df)
self.assertRaises(TypeError, store.select, 'df', columns=['A'])
self.assertRaises(TypeError, store.select,
'df', where=[('columns=A')])
def test_append_misc(self):
with ensure_clean_store(self.path) as store:
# unsuported data types for non-tables
p4d = tm.makePanel4D()
self.assertRaises(TypeError, store.put, 'p4d', p4d)
# unsuported data types
self.assertRaises(TypeError, store.put, 'abc', None)
self.assertRaises(TypeError, store.put, 'abc', '123')
self.assertRaises(TypeError, store.put, 'abc', 123)
self.assertRaises(TypeError, store.put, 'abc', np.arange(5))
df = tm.makeDataFrame()
store.append('df', df, chunksize=1)
result = store.select('df')
tm.assert_frame_equal(result, df)
store.append('df1', df, expectedrows=10)
result = store.select('df1')
tm.assert_frame_equal(result, df)
# more chunksize in append tests
def check(obj, comparator):
for c in [10, 200, 1000]:
with ensure_clean_store(self.path, mode='w') as store:
store.append('obj', obj, chunksize=c)
result = store.select('obj')
comparator(result, obj)
df = tm.makeDataFrame()
df['string'] = 'foo'
df['float322'] = 1.
df['float322'] = df['float322'].astype('float32')
df['bool'] = df['float322'] > 0
df['time1'] = Timestamp('20130101')
df['time2'] = Timestamp('20130102')
check(df, tm.assert_frame_equal)
p = tm.makePanel()
check(p, assert_panel_equal)
p4d = tm.makePanel4D()
check(p4d, assert_panel4d_equal)
# empty frame, GH4273
with ensure_clean_store(self.path) as store:
# 0 len
df_empty = DataFrame(columns=list('ABC'))
store.append('df', df_empty)
self.assertRaises(KeyError, store.select, 'df')
# repeated append of 0/non-zero frames
df = DataFrame(np.random.rand(10, 3), columns=list('ABC'))
store.append('df', df)
assert_frame_equal(store.select('df'), df)
store.append('df', df_empty)
assert_frame_equal(store.select('df'), df)
# store
df = DataFrame(columns=list('ABC'))
store.put('df2', df)
assert_frame_equal(store.select('df2'), df)
# 0 len
p_empty = Panel(items=list('ABC'))
store.append('p', p_empty)
self.assertRaises(KeyError, store.select, 'p')
# repeated append of 0/non-zero frames
p = Panel(np.random.randn(3, 4, 5), items=list('ABC'))
store.append('p', p)
assert_panel_equal(store.select('p'), p)
store.append('p', p_empty)
assert_panel_equal(store.select('p'), p)
# store
store.put('p2', p_empty)
assert_panel_equal(store.select('p2'), p_empty)
def test_append_raise(self):
with ensure_clean_store(self.path) as store:
# test append with invalid input to get good error messages
# list in column
df = tm.makeDataFrame()
df['invalid'] = [['a']] * len(df)
self.assertEqual(df.dtypes['invalid'], np.object_)
self.assertRaises(TypeError, store.append, 'df', df)
# multiple invalid columns
df['invalid2'] = [['a']] * len(df)
df['invalid3'] = [['a']] * len(df)
self.assertRaises(TypeError, store.append, 'df', df)
# datetime with embedded nans as object
df = tm.makeDataFrame()
s = Series(datetime.datetime(2001, 1, 2), index=df.index)
s = s.astype(object)
s[0:5] = np.nan
df['invalid'] = s
self.assertEqual(df.dtypes['invalid'], np.object_)
self.assertRaises(TypeError, store.append, 'df', df)
# directy ndarray
self.assertRaises(TypeError, store.append, 'df', np.arange(10))
# series directly
self.assertRaises(TypeError, store.append,
'df', Series(np.arange(10)))
# appending an incompatbile table
df = tm.makeDataFrame()
store.append('df', df)
df['foo'] = 'foo'
self.assertRaises(ValueError, store.append, 'df', df)
def test_table_index_incompatible_dtypes(self):
df1 = DataFrame({'a': [1, 2, 3]})
df2 = DataFrame({'a': [4, 5, 6]},
index=date_range('1/1/2000', periods=3))
with ensure_clean_store(self.path) as store:
store.put('frame', df1, format='table')
self.assertRaises(TypeError, store.put, 'frame', df2,
format='table', append=True)
def test_table_values_dtypes_roundtrip(self):
with ensure_clean_store(self.path) as store:
df1 = DataFrame({'a': [1, 2, 3]}, dtype='f8')
store.append('df_f8', df1)
assert_series_equal(df1.dtypes, store['df_f8'].dtypes)
df2 = DataFrame({'a': [1, 2, 3]}, dtype='i8')
store.append('df_i8', df2)
assert_series_equal(df2.dtypes, store['df_i8'].dtypes)
# incompatible dtype
self.assertRaises(ValueError, store.append, 'df_i8', df1)
# check creation/storage/retrieval of float32 (a bit hacky to
# actually create them thought)
df1 = DataFrame(
np.array([[1], [2], [3]], dtype='f4'), columns=['A'])
store.append('df_f4', df1)
assert_series_equal(df1.dtypes, store['df_f4'].dtypes)
assert df1.dtypes[0] == 'float32'
# check with mixed dtypes
df1 = DataFrame(dict([(c, Series(np.random.randn(5), dtype=c))
for c in ['float32', 'float64', 'int32',
'int64', 'int16', 'int8']]))
df1['string'] = 'foo'
df1['float322'] = 1.
df1['float322'] = df1['float322'].astype('float32')
df1['bool'] = df1['float32'] > 0
df1['time1'] = Timestamp('20130101')
df1['time2'] = Timestamp('20130102')
store.append('df_mixed_dtypes1', df1)
result = store.select('df_mixed_dtypes1').get_dtype_counts()
expected = Series({'float32': 2, 'float64': 1, 'int32': 1,
'bool': 1, 'int16': 1, 'int8': 1,
'int64': 1, 'object': 1, 'datetime64[ns]': 2})
result.sort()
expected.sort()
tm.assert_series_equal(result, expected)
def test_table_mixed_dtypes(self):
# frame
df = tm.makeDataFrame()
df['obj1'] = 'foo'
df['obj2'] = 'bar'
df['bool1'] = df['A'] > 0
df['bool2'] = df['B'] > 0
df['bool3'] = True
df['int1'] = 1
df['int2'] = 2
df['timestamp1'] = Timestamp('20010102')
df['timestamp2'] = Timestamp('20010103')
df['datetime1'] = datetime.datetime(2001, 1, 2, 0, 0)
df['datetime2'] = datetime.datetime(2001, 1, 3, 0, 0)
df.ix[3:6, ['obj1']] = np.nan
df = df.consolidate()._convert(datetime=True)
with ensure_clean_store(self.path) as store:
store.append('df1_mixed', df)
tm.assert_frame_equal(store.select('df1_mixed'), df)
# panel
wp = tm.makePanel()
wp['obj1'] = 'foo'
wp['obj2'] = 'bar'
wp['bool1'] = wp['ItemA'] > 0
wp['bool2'] = wp['ItemB'] > 0
wp['int1'] = 1
wp['int2'] = 2
wp = wp.consolidate()
with ensure_clean_store(self.path) as store:
store.append('p1_mixed', wp)
assert_panel_equal(store.select('p1_mixed'), wp)
# ndim
wp = tm.makePanel4D()
wp['obj1'] = 'foo'
wp['obj2'] = 'bar'
wp['bool1'] = wp['l1'] > 0
wp['bool2'] = wp['l2'] > 0
wp['int1'] = 1
wp['int2'] = 2
wp = wp.consolidate()
with ensure_clean_store(self.path) as store:
store.append('p4d_mixed', wp)
assert_panel4d_equal(store.select('p4d_mixed'), wp)
def test_unimplemented_dtypes_table_columns(self):
with ensure_clean_store(self.path) as store:
l = [('date', datetime.date(2001, 1, 2))]
# py3 ok for unicode
if not compat.PY3:
l.append(('unicode', u('\\u03c3')))
# currently not supported dtypes ####
for n, f in l:
df = tm.makeDataFrame()
df[n] = f
self.assertRaises(
TypeError, store.append, 'df1_%s' % n, df)
# frame
df = tm.makeDataFrame()
df['obj1'] = 'foo'
df['obj2'] = 'bar'
df['datetime1'] = datetime.date(2001, 1, 2)
df = df.consolidate()._convert(datetime=True)
with ensure_clean_store(self.path) as store:
# this fails because we have a date in the object block......
self.assertRaises(TypeError, store.append, 'df_unimplemented', df)
def test_calendar_roundtrip_issue(self):
# 8591
# doc example from tseries holiday section
weekmask_egypt = 'Sun Mon Tue Wed Thu'
holidays = ['2012-05-01',
datetime.datetime(2013, 5, 1), np.datetime64('2014-05-01')]
bday_egypt = pandas.offsets.CustomBusinessDay(
holidays=holidays, weekmask=weekmask_egypt)
dt = datetime.datetime(2013, 4, 30)
dts = date_range(dt, periods=5, freq=bday_egypt)
s = (Series(dts.weekday, dts).map(
Series('Mon Tue Wed Thu Fri Sat Sun'.split())))
with ensure_clean_store(self.path) as store:
store.put('fixed', s)
result = store.select('fixed')
assert_series_equal(result, s)
store.append('table', s)
result = store.select('table')
assert_series_equal(result, s)
def test_append_with_timedelta(self):
# GH 3577
# append timedelta
from datetime import timedelta
df = DataFrame(dict(A=Timestamp('20130101'), B=[Timestamp(
'20130101') + timedelta(days=i, seconds=10) for i in range(10)]))
df['C'] = df['A'] - df['B']
df.ix[3:5, 'C'] = np.nan
with ensure_clean_store(self.path) as store:
# table
_maybe_remove(store, 'df')
store.append('df', df, data_columns=True)
result = store.select('df')
assert_frame_equal(result, df)
result = store.select('df', Term("C<100000"))
assert_frame_equal(result, df)
result = store.select('df', Term("C", "<", -3 * 86400))
assert_frame_equal(result, df.iloc[3:])
result = store.select('df', "C<'-3D'")
assert_frame_equal(result, df.iloc[3:])
# a bit hacky here as we don't really deal with the NaT properly
result = store.select('df', "C<'-500000s'")
result = result.dropna(subset=['C'])
assert_frame_equal(result, df.iloc[6:])
result = store.select('df', "C<'-3.5D'")
result = result.iloc[1:]
assert_frame_equal(result, df.iloc[4:])
# fixed
_maybe_remove(store, 'df2')
store.put('df2', df)
result = store.select('df2')
assert_frame_equal(result, df)
def test_remove(self):
with ensure_clean_store(self.path) as store:
ts = tm.makeTimeSeries()
df = tm.makeDataFrame()
store['a'] = ts
store['b'] = df
_maybe_remove(store, 'a')
self.assertEqual(len(store), 1)
tm.assert_frame_equal(df, store['b'])
_maybe_remove(store, 'b')
self.assertEqual(len(store), 0)
# nonexistence
self.assertRaises(KeyError, store.remove, 'a_nonexistent_store')
# pathing
store['a'] = ts
store['b/foo'] = df
_maybe_remove(store, 'foo')
_maybe_remove(store, 'b/foo')
self.assertEqual(len(store), 1)
store['a'] = ts
store['b/foo'] = df
_maybe_remove(store, 'b')
self.assertEqual(len(store), 1)
# __delitem__
store['a'] = ts
store['b'] = df
del store['a']
del store['b']
self.assertEqual(len(store), 0)
def test_remove_where(self):
with ensure_clean_store(self.path) as store:
# non-existance
crit1 = Term('index>foo')
self.assertRaises(KeyError, store.remove, 'a', [crit1])
# try to remove non-table (with crit)
# non-table ok (where = None)
wp = tm.makePanel(30)
store.put('wp', wp, format='table')
store.remove('wp', ["minor_axis=['A', 'D']"])
rs = store.select('wp')
expected = wp.reindex(minor_axis=['B', 'C'])
assert_panel_equal(rs, expected)
# empty where
_maybe_remove(store, 'wp')
store.put('wp', wp, format='table')
# deleted number (entire table)
n = store.remove('wp', [])
self.assertTrue(n == 120)
# non - empty where
_maybe_remove(store, 'wp')
store.put('wp', wp, format='table')
self.assertRaises(ValueError, store.remove,
'wp', ['foo'])
# selectin non-table with a where
# store.put('wp2', wp, format='f')
# self.assertRaises(ValueError, store.remove,
# 'wp2', [('column', ['A', 'D'])])
def test_remove_startstop(self):
# GH #4835 and #6177
with ensure_clean_store(self.path) as store:
wp = tm.makePanel(30)
# start
_maybe_remove(store, 'wp1')
store.put('wp1', wp, format='t')
n = store.remove('wp1', start=32)
self.assertTrue(n == 120 - 32)
result = store.select('wp1')
expected = wp.reindex(major_axis=wp.major_axis[:32 // 4])
assert_panel_equal(result, expected)
_maybe_remove(store, 'wp2')
store.put('wp2', wp, format='t')
n = store.remove('wp2', start=-32)
self.assertTrue(n == 32)
result = store.select('wp2')
expected = wp.reindex(major_axis=wp.major_axis[:-32 // 4])
assert_panel_equal(result, expected)
# stop
_maybe_remove(store, 'wp3')
store.put('wp3', wp, format='t')
n = store.remove('wp3', stop=32)
self.assertTrue(n == 32)
result = store.select('wp3')
expected = wp.reindex(major_axis=wp.major_axis[32 // 4:])
assert_panel_equal(result, expected)
_maybe_remove(store, 'wp4')
store.put('wp4', wp, format='t')
n = store.remove('wp4', stop=-32)
self.assertTrue(n == 120 - 32)
result = store.select('wp4')
expected = wp.reindex(major_axis=wp.major_axis[-32 // 4:])
assert_panel_equal(result, expected)
# start n stop
_maybe_remove(store, 'wp5')
store.put('wp5', wp, format='t')
n = store.remove('wp5', start=16, stop=-16)
self.assertTrue(n == 120 - 32)
result = store.select('wp5')
expected = wp.reindex(major_axis=wp.major_axis[
:16 // 4].union(wp.major_axis[-16 // 4:]))
assert_panel_equal(result, expected)
_maybe_remove(store, 'wp6')
store.put('wp6', wp, format='t')
n = store.remove('wp6', start=16, stop=16)
self.assertTrue(n == 0)
result = store.select('wp6')
expected = wp.reindex(major_axis=wp.major_axis)
assert_panel_equal(result, expected)
# with where
_maybe_remove(store, 'wp7')
# TODO: unused?
date = wp.major_axis.take(np.arange(0, 30, 3)) # noqa
crit = Term('major_axis=date')
store.put('wp7', wp, format='t')
n = store.remove('wp7', where=[crit], stop=80)
self.assertTrue(n == 28)
result = store.select('wp7')
expected = wp.reindex(major_axis=wp.major_axis.difference(
wp.major_axis[np.arange(0, 20, 3)]))
assert_panel_equal(result, expected)
def test_remove_crit(self):
with ensure_clean_store(self.path) as store:
wp = tm.makePanel(30)
# group row removal
_maybe_remove(store, 'wp3')
date4 = wp.major_axis.take([0, 1, 2, 4, 5, 6, 8, 9, 10])
crit4 = Term('major_axis=date4')
store.put('wp3', wp, format='t')
n = store.remove('wp3', where=[crit4])
self.assertTrue(n == 36)
result = store.select('wp3')
expected = wp.reindex(major_axis=wp.major_axis.difference(date4))
assert_panel_equal(result, expected)
# upper half
_maybe_remove(store, 'wp')
store.put('wp', wp, format='table')
date = wp.major_axis[len(wp.major_axis) // 2]
crit1 = Term('major_axis>date')
crit2 = Term("minor_axis=['A', 'D']")
n = store.remove('wp', where=[crit1])
self.assertTrue(n == 56)
n = store.remove('wp', where=[crit2])
self.assertTrue(n == 32)
result = store['wp']
expected = wp.truncate(after=date).reindex(minor=['B', 'C'])
assert_panel_equal(result, expected)
# individual row elements
_maybe_remove(store, 'wp2')
store.put('wp2', wp, format='table')
date1 = wp.major_axis[1:3]
crit1 = Term('major_axis=date1')
store.remove('wp2', where=[crit1])
result = store.select('wp2')
expected = wp.reindex(major_axis=wp.major_axis.difference(date1))
assert_panel_equal(result, expected)
date2 = wp.major_axis[5]
crit2 = Term('major_axis=date2')
store.remove('wp2', where=[crit2])
result = store['wp2']
expected = wp.reindex(major_axis=wp.major_axis.difference(date1)
.difference(Index([date2])))
assert_panel_equal(result, expected)
date3 = [wp.major_axis[7], wp.major_axis[9]]
crit3 = Term('major_axis=date3')
store.remove('wp2', where=[crit3])
result = store['wp2']
expected = wp.reindex(major_axis=wp.major_axis
.difference(date1)
.difference(Index([date2]))
.difference(Index(date3)))
assert_panel_equal(result, expected)
# corners
_maybe_remove(store, 'wp4')
store.put('wp4', wp, format='table')
n = store.remove(
'wp4', where=[Term('major_axis>wp.major_axis[-1]')])
result = store.select('wp4')
assert_panel_equal(result, wp)
def test_invalid_terms(self):
with ensure_clean_store(self.path) as store:
df = tm.makeTimeDataFrame()
df['string'] = 'foo'
df.ix[0:4, 'string'] = 'bar'
wp = tm.makePanel()
p4d = tm.makePanel4D()
store.put('df', df, format='table')
store.put('wp', wp, format='table')
store.put('p4d', p4d, format='table')
# some invalid terms
self.assertRaises(ValueError, store.select,
'wp', "minor=['A', 'B']")
self.assertRaises(ValueError, store.select,
'wp', ["index=['20121114']"])
self.assertRaises(ValueError, store.select, 'wp', [
"index=['20121114', '20121114']"])
self.assertRaises(TypeError, Term)
# more invalid
self.assertRaises(ValueError, store.select, 'df', 'df.index[3]')
self.assertRaises(SyntaxError, store.select, 'df', 'index>')
self.assertRaises(ValueError, store.select, 'wp',
"major_axis<'20000108' & minor_axis['A', 'B']")
# from the docs
with ensure_clean_path(self.path) as path:
dfq = DataFrame(np.random.randn(10, 4), columns=list(
'ABCD'), index=date_range('20130101', periods=10))
dfq.to_hdf(path, 'dfq', format='table', data_columns=True)
# check ok
read_hdf(path, 'dfq',
where="index>Timestamp('20130104') & columns=['A', 'B']")
read_hdf(path, 'dfq', where="A>0 or C>0")
# catch the invalid reference
with ensure_clean_path(self.path) as path:
dfq = DataFrame(np.random.randn(10, 4), columns=list(
'ABCD'), index=date_range('20130101', periods=10))
dfq.to_hdf(path, 'dfq', format='table')
self.assertRaises(ValueError, read_hdf, path,
'dfq', where="A>0 or C>0")
def test_terms(self):
with ensure_clean_store(self.path) as store:
wp = tm.makePanel()
p4d = tm.makePanel4D()
wpneg = Panel.fromDict({-1: tm.makeDataFrame(),
0: tm.makeDataFrame(),
1: tm.makeDataFrame()})
store.put('wp', wp, format='table')
store.put('p4d', p4d, format='table')
store.put('wpneg', wpneg, format='table')
# panel
result = store.select('wp', [Term(
'major_axis<"20000108"'), Term("minor_axis=['A', 'B']")])
expected = wp.truncate(after='20000108').reindex(minor=['A', 'B'])
assert_panel_equal(result, expected)
# with deprecation
result = store.select('wp', [Term(
'major_axis', '<', "20000108"), Term("minor_axis=['A', 'B']")])
expected = wp.truncate(after='20000108').reindex(minor=['A', 'B'])
tm.assert_panel_equal(result, expected)
# p4d
result = store.select('p4d', [Term('major_axis<"20000108"'),
Term("minor_axis=['A', 'B']"),
Term("items=['ItemA', 'ItemB']")])
expected = p4d.truncate(after='20000108').reindex(
minor=['A', 'B'], items=['ItemA', 'ItemB'])
assert_panel4d_equal(result, expected)
# back compat invalid terms
terms = [dict(field='major_axis', op='>', value='20121114'),
[dict(field='major_axis', op='>', value='20121114')],
["minor_axis=['A','B']",
dict(field='major_axis', op='>', value='20121114')]]
for t in terms:
with tm.assert_produces_warning(expected_warning=FutureWarning,
check_stacklevel=False):
Term(t)
# valid terms
terms = [
('major_axis=20121114'),
('major_axis>20121114'),
(("major_axis=['20121114', '20121114']"),),
('major_axis=datetime.datetime(2012, 11, 14)'),
'major_axis> 20121114',
'major_axis >20121114',
'major_axis > 20121114',
(("minor_axis=['A', 'B']"),),
(("minor_axis=['A', 'B']"),),
((("minor_axis==['A', 'B']"),),),
(("items=['ItemA', 'ItemB']"),),
('items=ItemA'),
]
for t in terms:
store.select('wp', t)
store.select('p4d', t)
# valid for p4d only
terms = [
(("labels=['l1', 'l2']"),),
Term("labels=['l1', 'l2']"),
]
for t in terms:
store.select('p4d', t)
with tm.assertRaisesRegexp(TypeError,
'Only named functions are supported'):
store.select('wp', Term(
'major_axis == (lambda x: x)("20130101")'))
# check USub node parsing
res = store.select('wpneg', Term('items == -1'))
expected = Panel({-1: wpneg[-1]})
tm.assert_panel_equal(res, expected)
with tm.assertRaisesRegexp(NotImplementedError,
'Unary addition not supported'):
store.select('wpneg', Term('items == +1'))
def test_term_compat(self):
with ensure_clean_store(self.path) as store:
wp = Panel(np.random.randn(2, 5, 4), items=['Item1', 'Item2'],
major_axis=date_range('1/1/2000', periods=5),
minor_axis=['A', 'B', 'C', 'D'])
store.append('wp', wp)
result = store.select('wp', [Term('major_axis>20000102'),
Term('minor_axis', '=', ['A', 'B'])])
expected = wp.loc[:, wp.major_axis >
Timestamp('20000102'), ['A', 'B']]
assert_panel_equal(result, expected)
store.remove('wp', Term('major_axis>20000103'))
result = store.select('wp')
expected = wp.loc[:, wp.major_axis <= Timestamp('20000103'), :]
assert_panel_equal(result, expected)
with ensure_clean_store(self.path) as store:
wp = Panel(np.random.randn(2, 5, 4), items=['Item1', 'Item2'],
major_axis=date_range('1/1/2000', periods=5),
minor_axis=['A', 'B', 'C', 'D'])
store.append('wp', wp)
# stringified datetimes
result = store.select(
'wp', [Term('major_axis', '>', datetime.datetime(2000, 1, 2))])
expected = wp.loc[:, wp.major_axis > Timestamp('20000102')]
assert_panel_equal(result, expected)
result = store.select(
'wp', [Term('major_axis', '>',
datetime.datetime(2000, 1, 2, 0, 0))])
expected = wp.loc[:, wp.major_axis > Timestamp('20000102')]
assert_panel_equal(result, expected)
result = store.select(
'wp', [Term('major_axis', '=',
[datetime.datetime(2000, 1, 2, 0, 0),
datetime.datetime(2000, 1, 3, 0, 0)])])
expected = wp.loc[:, [Timestamp('20000102'),
Timestamp('20000103')]]
assert_panel_equal(result, expected)
result = store.select('wp', [Term('minor_axis', '=', ['A', 'B'])])
expected = wp.loc[:, :, ['A', 'B']]
assert_panel_equal(result, expected)
def test_backwards_compat_without_term_object(self):
with ensure_clean_store(self.path) as store:
wp = Panel(np.random.randn(2, 5, 4), items=['Item1', 'Item2'],
major_axis=date_range('1/1/2000', periods=5),
minor_axis=['A', 'B', 'C', 'D'])
store.append('wp', wp)
with assert_produces_warning(expected_warning=FutureWarning,
check_stacklevel=False):
result = store.select('wp', [('major_axis>20000102'),
('minor_axis', '=', ['A', 'B'])])
expected = wp.loc[:,
wp.major_axis > Timestamp('20000102'),
['A', 'B']]
assert_panel_equal(result, expected)
store.remove('wp', ('major_axis>20000103'))
result = store.select('wp')
expected = wp.loc[:, wp.major_axis <= Timestamp('20000103'), :]
assert_panel_equal(result, expected)
with ensure_clean_store(self.path) as store:
wp = Panel(np.random.randn(2, 5, 4), items=['Item1', 'Item2'],
major_axis=date_range('1/1/2000', periods=5),
minor_axis=['A', 'B', 'C', 'D'])
store.append('wp', wp)
# stringified datetimes
with assert_produces_warning(expected_warning=FutureWarning,
check_stacklevel=False):
result = store.select('wp',
[('major_axis',
'>',
datetime.datetime(2000, 1, 2))])
expected = wp.loc[:, wp.major_axis > Timestamp('20000102')]
assert_panel_equal(result, expected)
with assert_produces_warning(expected_warning=FutureWarning,
check_stacklevel=False):
result = store.select('wp',
[('major_axis',
'>',
datetime.datetime(2000, 1, 2, 0, 0))])
expected = wp.loc[:, wp.major_axis > Timestamp('20000102')]
assert_panel_equal(result, expected)
with assert_produces_warning(expected_warning=FutureWarning,
check_stacklevel=False):
result = store.select('wp',
[('major_axis',
'=',
[datetime.datetime(2000, 1, 2, 0, 0),
datetime.datetime(2000, 1, 3, 0, 0)])]
)
expected = wp.loc[:, [Timestamp('20000102'),
Timestamp('20000103')]]
assert_panel_equal(result, expected)
def test_same_name_scoping(self):
with ensure_clean_store(self.path) as store:
import pandas as pd
df = DataFrame(np.random.randn(20, 2),
index=pd.date_range('20130101', periods=20))
store.put('df', df, format='table')
expected = df[df.index > pd.Timestamp('20130105')]
import datetime # noqa
result = store.select('df', 'index>datetime.datetime(2013,1,5)')
assert_frame_equal(result, expected)
from datetime import datetime # noqa
# technically an error, but allow it
result = store.select('df', 'index>datetime.datetime(2013,1,5)')
assert_frame_equal(result, expected)
result = store.select('df', 'index>datetime(2013,1,5)')
assert_frame_equal(result, expected)
def test_series(self):
s = tm.makeStringSeries()
self._check_roundtrip(s, tm.assert_series_equal)
ts = tm.makeTimeSeries()
self._check_roundtrip(ts, tm.assert_series_equal)
ts2 = Series(ts.index, Index(ts.index, dtype=object))
self._check_roundtrip(ts2, tm.assert_series_equal)
ts3 = Series(ts.values, Index(np.asarray(ts.index, dtype=object),
dtype=object))
self._check_roundtrip(ts3, tm.assert_series_equal,
check_index_type=False)
def test_sparse_series(self):
s = tm.makeStringSeries()
s[3:5] = np.nan
ss = s.to_sparse()
self._check_roundtrip(ss, tm.assert_series_equal,
check_series_type=True)
ss2 = s.to_sparse(kind='integer')
self._check_roundtrip(ss2, tm.assert_series_equal,
check_series_type=True)
ss3 = s.to_sparse(fill_value=0)
self._check_roundtrip(ss3, tm.assert_series_equal,
check_series_type=True)
def test_sparse_frame(self):
s = tm.makeDataFrame()
s.ix[3:5, 1:3] = np.nan
s.ix[8:10, -2] = np.nan
ss = s.to_sparse()
self._check_double_roundtrip(ss, tm.assert_frame_equal,
check_frame_type=True)
ss2 = s.to_sparse(kind='integer')
self._check_double_roundtrip(ss2, tm.assert_frame_equal,
check_frame_type=True)
ss3 = s.to_sparse(fill_value=0)
self._check_double_roundtrip(ss3, tm.assert_frame_equal,
check_frame_type=True)
def test_sparse_panel(self):
items = ['x', 'y', 'z']
p = Panel(dict((i, tm.makeDataFrame().ix[:2, :2]) for i in items))
sp = p.to_sparse()
self._check_double_roundtrip(sp, assert_panel_equal,
check_panel_type=True)
sp2 = p.to_sparse(kind='integer')
self._check_double_roundtrip(sp2, assert_panel_equal,
check_panel_type=True)
sp3 = p.to_sparse(fill_value=0)
self._check_double_roundtrip(sp3, assert_panel_equal,
check_panel_type=True)
def test_float_index(self):
# GH #454
index = np.random.randn(10)
s = Series(np.random.randn(10), index=index)
self._check_roundtrip(s, tm.assert_series_equal)
def test_tuple_index(self):
# GH #492
col = np.arange(10)
idx = [(0., 1.), (2., 3.), (4., 5.)]
data = np.random.randn(30).reshape((3, 10))
DF = DataFrame(data, index=idx, columns=col)
expected_warning = Warning if PY35 else PerformanceWarning
with tm.assert_produces_warning(expected_warning=expected_warning,
check_stacklevel=False):
self._check_roundtrip(DF, tm.assert_frame_equal)
def test_index_types(self):
values = np.random.randn(2)
func = lambda l, r: tm.assert_series_equal(l, r,
check_dtype=True,
check_index_type=True,
check_series_type=True)
# nose has a deprecation warning in 3.5
expected_warning = Warning if PY35 else PerformanceWarning
with tm.assert_produces_warning(expected_warning=expected_warning,
check_stacklevel=False):
ser = Series(values, [0, 'y'])
self._check_roundtrip(ser, func)
with tm.assert_produces_warning(expected_warning=expected_warning,
check_stacklevel=False):
ser = Series(values, [datetime.datetime.today(), 0])
self._check_roundtrip(ser, func)
with tm.assert_produces_warning(expected_warning=expected_warning,
check_stacklevel=False):
ser = Series(values, ['y', 0])
self._check_roundtrip(ser, func)
with tm.assert_produces_warning(expected_warning=expected_warning,
check_stacklevel=False):
ser = Series(values, [datetime.date.today(), 'a'])
self._check_roundtrip(ser, func)
with tm.assert_produces_warning(expected_warning=expected_warning,
check_stacklevel=False):
ser = Series(values, [1.23, 'b'])
self._check_roundtrip(ser, func)
ser = Series(values, [1, 1.53])
self._check_roundtrip(ser, func)
ser = Series(values, [1, 5])
self._check_roundtrip(ser, func)
ser = Series(values, [datetime.datetime(
2012, 1, 1), datetime.datetime(2012, 1, 2)])
self._check_roundtrip(ser, func)
def test_timeseries_preepoch(self):
if sys.version_info[0] == 2 and sys.version_info[1] < 7:
raise nose.SkipTest("won't work on Python < 2.7")
dr = bdate_range('1/1/1940', '1/1/1960')
ts = Series(np.random.randn(len(dr)), index=dr)
try:
self._check_roundtrip(ts, tm.assert_series_equal)
except OverflowError:
raise nose.SkipTest('known failer on some windows platforms')
def test_frame(self):
df = tm.makeDataFrame()
# put in some random NAs
df.values[0, 0] = np.nan
df.values[5, 3] = np.nan
self._check_roundtrip_table(df, tm.assert_frame_equal)
self._check_roundtrip(df, tm.assert_frame_equal)
if not skip_compression:
self._check_roundtrip_table(df, tm.assert_frame_equal,
compression=True)
self._check_roundtrip(df, tm.assert_frame_equal,
compression=True)
tdf = tm.makeTimeDataFrame()
self._check_roundtrip(tdf, tm.assert_frame_equal)
if not skip_compression:
self._check_roundtrip(tdf, tm.assert_frame_equal,
compression=True)
with ensure_clean_store(self.path) as store:
# not consolidated
df['foo'] = np.random.randn(len(df))
store['df'] = df
recons = store['df']
self.assertTrue(recons._data.is_consolidated())
# empty
self._check_roundtrip(df[:0], tm.assert_frame_equal)
def test_empty_series_frame(self):
s0 = Series()
s1 = Series(name='myseries')
df0 = DataFrame()
df1 = DataFrame(index=['a', 'b', 'c'])
df2 = DataFrame(columns=['d', 'e', 'f'])
self._check_roundtrip(s0, tm.assert_series_equal)
self._check_roundtrip(s1, tm.assert_series_equal)
self._check_roundtrip(df0, tm.assert_frame_equal)
self._check_roundtrip(df1, tm.assert_frame_equal)
self._check_roundtrip(df2, tm.assert_frame_equal)
def test_empty_series(self):
for dtype in [np.int64, np.float64, np.object, 'm8[ns]', 'M8[ns]']:
s = Series(dtype=dtype)
self._check_roundtrip(s, tm.assert_series_equal)
def test_can_serialize_dates(self):
rng = [x.date() for x in bdate_range('1/1/2000', '1/30/2000')]
frame = DataFrame(np.random.randn(len(rng), 4), index=rng)
self._check_roundtrip(frame, tm.assert_frame_equal)
def test_store_hierarchical(self):
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
['one', 'two', 'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['foo', 'bar'])
frame = DataFrame(np.random.randn(10, 3), index=index,
columns=['A', 'B', 'C'])
self._check_roundtrip(frame, tm.assert_frame_equal)
self._check_roundtrip(frame.T, tm.assert_frame_equal)
self._check_roundtrip(frame['A'], tm.assert_series_equal)
# check that the names are stored
with ensure_clean_store(self.path) as store:
store['frame'] = frame
recons = store['frame']
assert(recons.index.names == ('foo', 'bar'))
def test_store_index_name(self):
df = tm.makeDataFrame()
df.index.name = 'foo'
with ensure_clean_store(self.path) as store:
store['frame'] = df
recons = store['frame']
assert(recons.index.name == 'foo')
def test_store_series_name(self):
df = tm.makeDataFrame()
series = df['A']
with ensure_clean_store(self.path) as store:
store['series'] = series
recons = store['series']
assert(recons.name == 'A')
def test_store_mixed(self):
def _make_one():
df = tm.makeDataFrame()
df['obj1'] = 'foo'
df['obj2'] = 'bar'
df['bool1'] = df['A'] > 0
df['bool2'] = df['B'] > 0
df['int1'] = 1
df['int2'] = 2
return df.consolidate()
df1 = _make_one()
df2 = _make_one()
self._check_roundtrip(df1, tm.assert_frame_equal)
self._check_roundtrip(df2, tm.assert_frame_equal)
with ensure_clean_store(self.path) as store:
store['obj'] = df1
tm.assert_frame_equal(store['obj'], df1)
store['obj'] = df2
tm.assert_frame_equal(store['obj'], df2)
# check that can store Series of all of these types
self._check_roundtrip(df1['obj1'], tm.assert_series_equal)
self._check_roundtrip(df1['bool1'], tm.assert_series_equal)
self._check_roundtrip(df1['int1'], tm.assert_series_equal)
if not skip_compression:
self._check_roundtrip(df1['obj1'], tm.assert_series_equal,
compression=True)
self._check_roundtrip(df1['bool1'], tm.assert_series_equal,
compression=True)
self._check_roundtrip(df1['int1'], tm.assert_series_equal,
compression=True)
self._check_roundtrip(df1, tm.assert_frame_equal,
compression=True)
def test_wide(self):
wp = tm.makePanel()
self._check_roundtrip(wp, assert_panel_equal)
def test_wide_table(self):
wp = tm.makePanel()
self._check_roundtrip_table(wp, assert_panel_equal)
def test_select_with_dups(self):
# single dtypes
df = DataFrame(np.random.randn(10, 4), columns=['A', 'A', 'B', 'B'])
df.index = date_range('20130101 9:30', periods=10, freq='T')
with ensure_clean_store(self.path) as store:
store.append('df', df)
result = store.select('df')
expected = df
assert_frame_equal(result, expected, by_blocks=True)
result = store.select('df', columns=df.columns)
expected = df
assert_frame_equal(result, expected, by_blocks=True)
result = store.select('df', columns=['A'])
expected = df.loc[:, ['A']]
assert_frame_equal(result, expected)
# dups accross dtypes
df = concat([DataFrame(np.random.randn(10, 4),
columns=['A', 'A', 'B', 'B']),
DataFrame(np.random.randint(0, 10, size=20)
.reshape(10, 2),
columns=['A', 'C'])],
axis=1)
df.index = date_range('20130101 9:30', periods=10, freq='T')
with ensure_clean_store(self.path) as store:
store.append('df', df)
result = store.select('df')
expected = df
assert_frame_equal(result, expected, by_blocks=True)
result = store.select('df', columns=df.columns)
expected = df
assert_frame_equal(result, expected, by_blocks=True)
expected = df.loc[:, ['A']]
result = store.select('df', columns=['A'])
assert_frame_equal(result, expected, by_blocks=True)
expected = df.loc[:, ['B', 'A']]
result = store.select('df', columns=['B', 'A'])
assert_frame_equal(result, expected, by_blocks=True)
# duplicates on both index and columns
with ensure_clean_store(self.path) as store:
store.append('df', df)
store.append('df', df)
expected = df.loc[:, ['B', 'A']]
expected = concat([expected, expected])
result = store.select('df', columns=['B', 'A'])
assert_frame_equal(result, expected, by_blocks=True)
def test_wide_table_dups(self):
wp = tm.makePanel()
with ensure_clean_store(self.path) as store:
store.put('panel', wp, format='table')
store.put('panel', wp, format='table', append=True)
with tm.assert_produces_warning(expected_warning=DuplicateWarning):
recons = store['panel']
assert_panel_equal(recons, wp)
def test_long(self):
def _check(left, right):
assert_panel_equal(left.to_panel(), right.to_panel())
wp = tm.makePanel()
self._check_roundtrip(wp.to_frame(), _check)
# empty
# self._check_roundtrip(wp.to_frame()[:0], _check)
def test_longpanel(self):
pass
def test_overwrite_node(self):
with ensure_clean_store(self.path) as store:
store['a'] = tm.makeTimeDataFrame()
ts = tm.makeTimeSeries()
store['a'] = ts
tm.assert_series_equal(store['a'], ts)
def test_sparse_with_compression(self):
# GH 2931
# make sparse dataframe
df = DataFrame(np.random.binomial(
n=1, p=.01, size=(1e3, 10))).to_sparse(fill_value=0)
# case 1: store uncompressed
self._check_double_roundtrip(df, tm.assert_frame_equal,
compression=False,
check_frame_type=True)
# case 2: store compressed (works)
self._check_double_roundtrip(df, tm.assert_frame_equal,
compression='zlib',
check_frame_type=True)
# set one series to be completely sparse
df[0] = np.zeros(1e3)
# case 3: store df with completely sparse series uncompressed
self._check_double_roundtrip(df, tm.assert_frame_equal,
compression=False,
check_frame_type=True)
# case 4: try storing df with completely sparse series compressed
# (fails)
self._check_double_roundtrip(df, tm.assert_frame_equal,
compression='zlib',
check_frame_type=True)
def test_select(self):
wp = tm.makePanel()
with ensure_clean_store(self.path) as store:
# put/select ok
_maybe_remove(store, 'wp')
store.put('wp', wp, format='table')
store.select('wp')
# non-table ok (where = None)
_maybe_remove(store, 'wp')
store.put('wp2', wp)
store.select('wp2')
# selection on the non-indexable with a large number of columns
wp = Panel(np.random.randn(100, 100, 100),
items=['Item%03d' % i for i in range(100)],
major_axis=date_range('1/1/2000', periods=100),
minor_axis=['E%03d' % i for i in range(100)])
_maybe_remove(store, 'wp')
store.append('wp', wp)
items = ['Item%03d' % i for i in range(80)]
result = store.select('wp', Term('items=items'))
expected = wp.reindex(items=items)
assert_panel_equal(expected, result)
# selectin non-table with a where
# self.assertRaises(ValueError, store.select,
# 'wp2', ('column', ['A', 'D']))
# select with columns=
df = tm.makeTimeDataFrame()
_maybe_remove(store, 'df')
store.append('df', df)
result = store.select('df', columns=['A', 'B'])
expected = df.reindex(columns=['A', 'B'])
tm.assert_frame_equal(expected, result)
# equivalentsly
result = store.select('df', [("columns=['A', 'B']")])
expected = df.reindex(columns=['A', 'B'])
tm.assert_frame_equal(expected, result)
# with a data column
_maybe_remove(store, 'df')
store.append('df', df, data_columns=['A'])
result = store.select('df', ['A > 0'], columns=['A', 'B'])
expected = df[df.A > 0].reindex(columns=['A', 'B'])
tm.assert_frame_equal(expected, result)
# all a data columns
_maybe_remove(store, 'df')
store.append('df', df, data_columns=True)
result = store.select('df', ['A > 0'], columns=['A', 'B'])
expected = df[df.A > 0].reindex(columns=['A', 'B'])
tm.assert_frame_equal(expected, result)
# with a data column, but different columns
_maybe_remove(store, 'df')
store.append('df', df, data_columns=['A'])
result = store.select('df', ['A > 0'], columns=['C', 'D'])
expected = df[df.A > 0].reindex(columns=['C', 'D'])
tm.assert_frame_equal(expected, result)
def test_select_dtypes(self):
with ensure_clean_store(self.path) as store:
# with a Timestamp data column (GH #2637)
df = DataFrame(dict(
ts=bdate_range('2012-01-01', periods=300),
A=np.random.randn(300)))
_maybe_remove(store, 'df')
store.append('df', df, data_columns=['ts', 'A'])
result = store.select('df', [Term("ts>=Timestamp('2012-02-01')")])
expected = df[df.ts >= Timestamp('2012-02-01')]
tm.assert_frame_equal(expected, result)
# bool columns (GH #2849)
df = DataFrame(np.random.randn(5, 2), columns=['A', 'B'])
df['object'] = 'foo'
df.ix[4:5, 'object'] = 'bar'
df['boolv'] = df['A'] > 0
_maybe_remove(store, 'df')
store.append('df', df, data_columns=True)
expected = (df[df.boolv == True] # noqa
.reindex(columns=['A', 'boolv']))
for v in [True, 'true', 1]:
result = store.select('df', Term(
'boolv == %s' % str(v)), columns=['A', 'boolv'])
tm.assert_frame_equal(expected, result)
expected = (df[df.boolv == False] # noqa
.reindex(columns=['A', 'boolv']))
for v in [False, 'false', 0]:
result = store.select('df', Term(
'boolv == %s' % str(v)), columns=['A', 'boolv'])
tm.assert_frame_equal(expected, result)
# integer index
df = DataFrame(dict(A=np.random.rand(20), B=np.random.rand(20)))
_maybe_remove(store, 'df_int')
store.append('df_int', df)
result = store.select(
'df_int', [Term("index<10"), Term("columns=['A']")])
expected = df.reindex(index=list(df.index)[0:10], columns=['A'])
tm.assert_frame_equal(expected, result)
# float index
df = DataFrame(dict(A=np.random.rand(
20), B=np.random.rand(20), index=np.arange(20, dtype='f8')))
_maybe_remove(store, 'df_float')
store.append('df_float', df)
result = store.select(
'df_float', [Term("index<10.0"), Term("columns=['A']")])
expected = df.reindex(index=list(df.index)[0:10], columns=['A'])
tm.assert_frame_equal(expected, result)
with ensure_clean_store(self.path) as store:
# floats w/o NaN
df = DataFrame(
dict(cols=range(11), values=range(11)), dtype='float64')
df['cols'] = (df['cols'] + 10).apply(str)
store.append('df1', df, data_columns=True)
result = store.select(
'df1', where='values>2.0')
expected = df[df['values'] > 2.0]
tm.assert_frame_equal(expected, result)
# floats with NaN
df.iloc[0] = np.nan
expected = df[df['values'] > 2.0]
store.append('df2', df, data_columns=True, index=False)
result = store.select(
'df2', where='values>2.0')
tm.assert_frame_equal(expected, result)
# https://github.com/PyTables/PyTables/issues/282
# bug in selection when 0th row has a np.nan and an index
# store.append('df3',df,data_columns=True)
# result = store.select(
# 'df3', where='values>2.0')
# tm.assert_frame_equal(expected, result)
# not in first position float with NaN ok too
df = DataFrame(
dict(cols=range(11), values=range(11)), dtype='float64')
df['cols'] = (df['cols'] + 10).apply(str)
df.iloc[1] = np.nan
expected = df[df['values'] > 2.0]
store.append('df4', df, data_columns=True)
result = store.select(
'df4', where='values>2.0')
tm.assert_frame_equal(expected, result)
# test selection with comparison against numpy scalar
# GH 11283
with ensure_clean_store(self.path) as store:
df = tm.makeDataFrame()
expected = df[df['A'] > 0]
store.append('df', df, data_columns=True)
np_zero = np.float64(0) # noqa
result = store.select('df', where=["A>np_zero"])
tm.assert_frame_equal(expected, result)
def test_select_with_many_inputs(self):
with ensure_clean_store(self.path) as store:
df = DataFrame(dict(ts=bdate_range('2012-01-01', periods=300),
A=np.random.randn(300),
B=range(300),
users=['a'] * 50 + ['b'] * 50 + ['c'] * 100 +
['a%03d' % i for i in range(100)]))
_maybe_remove(store, 'df')
store.append('df', df, data_columns=['ts', 'A', 'B', 'users'])
# regular select
result = store.select('df', [Term("ts>=Timestamp('2012-02-01')")])
expected = df[df.ts >= Timestamp('2012-02-01')]
tm.assert_frame_equal(expected, result)
# small selector
result = store.select(
'df', [Term("ts>=Timestamp('2012-02-01') & "
"users=['a','b','c']")])
expected = df[(df.ts >= Timestamp('2012-02-01')) &
df.users.isin(['a', 'b', 'c'])]
tm.assert_frame_equal(expected, result)
# big selector along the columns
selector = ['a', 'b', 'c'] + ['a%03d' % i for i in range(60)]
result = store.select(
'df', [Term("ts>=Timestamp('2012-02-01')"),
Term('users=selector')])
expected = df[(df.ts >= Timestamp('2012-02-01')) &
df.users.isin(selector)]
tm.assert_frame_equal(expected, result)
selector = range(100, 200)
result = store.select('df', [Term('B=selector')])
expected = df[df.B.isin(selector)]
tm.assert_frame_equal(expected, result)
self.assertEqual(len(result), 100)
# big selector along the index
selector = Index(df.ts[0:100].values)
result = store.select('df', [Term('ts=selector')])
expected = df[df.ts.isin(selector.values)]
tm.assert_frame_equal(expected, result)
self.assertEqual(len(result), 100)
def test_select_iterator(self):
# single table
with ensure_clean_store(self.path) as store:
df = tm.makeTimeDataFrame(500)
_maybe_remove(store, 'df')
store.append('df', df)
expected = store.select('df')
results = [s for s in store.select('df', iterator=True)]
result = concat(results)
tm.assert_frame_equal(expected, result)
results = [s for s in store.select('df', chunksize=100)]
self.assertEqual(len(results), 5)
result = concat(results)
tm.assert_frame_equal(expected, result)
results = [s for s in store.select('df', chunksize=150)]
result = concat(results)
tm.assert_frame_equal(result, expected)
with ensure_clean_path(self.path) as path:
df = tm.makeTimeDataFrame(500)
df.to_hdf(path, 'df_non_table')
self.assertRaises(TypeError, read_hdf, path,
'df_non_table', chunksize=100)
self.assertRaises(TypeError, read_hdf, path,
'df_non_table', iterator=True)
with ensure_clean_path(self.path) as path:
df = tm.makeTimeDataFrame(500)
df.to_hdf(path, 'df', format='table')
results = [s for s in read_hdf(path, 'df', chunksize=100)]
result = concat(results)
self.assertEqual(len(results), 5)
tm.assert_frame_equal(result, df)
tm.assert_frame_equal(result, read_hdf(path, 'df'))
# multiple
with ensure_clean_store(self.path) as store:
df1 = tm.makeTimeDataFrame(500)
store.append('df1', df1, data_columns=True)
df2 = tm.makeTimeDataFrame(500).rename(
columns=lambda x: "%s_2" % x)
df2['foo'] = 'bar'
store.append('df2', df2)
df = concat([df1, df2], axis=1)
# full selection
expected = store.select_as_multiple(
['df1', 'df2'], selector='df1')
results = [s for s in store.select_as_multiple(
['df1', 'df2'], selector='df1', chunksize=150)]
result = concat(results)
tm.assert_frame_equal(expected, result)
# where selection
# expected = store.select_as_multiple(
# ['df1', 'df2'], where= Term('A>0'), selector='df1')
# results = []
# for s in store.select_as_multiple(
# ['df1', 'df2'], where= Term('A>0'), selector='df1',
# chunksize=25):
# results.append(s)
# result = concat(results)
# tm.assert_frame_equal(expected, result)
def test_select_iterator_complete_8014(self):
# GH 8014
# using iterator and where clause
chunksize = 1e4
# no iterator
with ensure_clean_store(self.path) as store:
expected = tm.makeTimeDataFrame(100064, 'S')
_maybe_remove(store, 'df')
store.append('df', expected)
beg_dt = expected.index[0]
end_dt = expected.index[-1]
# select w/o iteration and no where clause works
result = store.select('df')
tm.assert_frame_equal(expected, result)
# select w/o iterator and where clause, single term, begin
# of range, works
where = "index >= '%s'" % beg_dt
result = store.select('df', where=where)
tm.assert_frame_equal(expected, result)
# select w/o iterator and where clause, single term, end
# of range, works
where = "index <= '%s'" % end_dt
result = store.select('df', where=where)
tm.assert_frame_equal(expected, result)
# select w/o iterator and where clause, inclusive range,
# works
where = "index >= '%s' & index <= '%s'" % (beg_dt, end_dt)
result = store.select('df', where=where)
tm.assert_frame_equal(expected, result)
# with iterator, full range
with ensure_clean_store(self.path) as store:
expected = tm.makeTimeDataFrame(100064, 'S')
_maybe_remove(store, 'df')
store.append('df', expected)
beg_dt = expected.index[0]
end_dt = expected.index[-1]
# select w/iterator and no where clause works
results = [s for s in store.select('df', chunksize=chunksize)]
result = concat(results)
tm.assert_frame_equal(expected, result)
# select w/iterator and where clause, single term, begin of range
where = "index >= '%s'" % beg_dt
results = [s for s in store.select(
'df', where=where, chunksize=chunksize)]
result = concat(results)
tm.assert_frame_equal(expected, result)
# select w/iterator and where clause, single term, end of range
where = "index <= '%s'" % end_dt
results = [s for s in store.select(
'df', where=where, chunksize=chunksize)]
result = concat(results)
tm.assert_frame_equal(expected, result)
# select w/iterator and where clause, inclusive range
where = "index >= '%s' & index <= '%s'" % (beg_dt, end_dt)
results = [s for s in store.select(
'df', where=where, chunksize=chunksize)]
result = concat(results)
tm.assert_frame_equal(expected, result)
def test_select_iterator_non_complete_8014(self):
# GH 8014
# using iterator and where clause
chunksize = 1e4
# with iterator, non complete range
with ensure_clean_store(self.path) as store:
expected = tm.makeTimeDataFrame(100064, 'S')
_maybe_remove(store, 'df')
store.append('df', expected)
beg_dt = expected.index[1]
end_dt = expected.index[-2]
# select w/iterator and where clause, single term, begin of range
where = "index >= '%s'" % beg_dt
results = [s for s in store.select(
'df', where=where, chunksize=chunksize)]
result = concat(results)
rexpected = expected[expected.index >= beg_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, single term, end of range
where = "index <= '%s'" % end_dt
results = [s for s in store.select(
'df', where=where, chunksize=chunksize)]
result = concat(results)
rexpected = expected[expected.index <= end_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, inclusive range
where = "index >= '%s' & index <= '%s'" % (beg_dt, end_dt)
results = [s for s in store.select(
'df', where=where, chunksize=chunksize)]
result = concat(results)
rexpected = expected[(expected.index >= beg_dt) &
(expected.index <= end_dt)]
tm.assert_frame_equal(rexpected, result)
# with iterator, empty where
with ensure_clean_store(self.path) as store:
expected = tm.makeTimeDataFrame(100064, 'S')
_maybe_remove(store, 'df')
store.append('df', expected)
end_dt = expected.index[-1]
# select w/iterator and where clause, single term, begin of range
where = "index > '%s'" % end_dt
results = [s for s in store.select(
'df', where=where, chunksize=chunksize)]
self.assertEqual(0, len(results))
def test_select_iterator_many_empty_frames(self):
# GH 8014
# using iterator and where clause can return many empty
# frames.
chunksize = int(1e4)
# with iterator, range limited to the first chunk
with ensure_clean_store(self.path) as store:
expected = tm.makeTimeDataFrame(100000, 'S')
_maybe_remove(store, 'df')
store.append('df', expected)
beg_dt = expected.index[0]
end_dt = expected.index[chunksize - 1]
# select w/iterator and where clause, single term, begin of range
where = "index >= '%s'" % beg_dt
results = [s for s in store.select(
'df', where=where, chunksize=chunksize)]
result = concat(results)
rexpected = expected[expected.index >= beg_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, single term, end of range
where = "index <= '%s'" % end_dt
results = [s for s in store.select(
'df', where=where, chunksize=chunksize)]
tm.assert_equal(1, len(results))
result = concat(results)
rexpected = expected[expected.index <= end_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, inclusive range
where = "index >= '%s' & index <= '%s'" % (beg_dt, end_dt)
results = [s for s in store.select(
'df', where=where, chunksize=chunksize)]
# should be 1, is 10
tm.assert_equal(1, len(results))
result = concat(results)
rexpected = expected[(expected.index >= beg_dt) &
(expected.index <= end_dt)]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause which selects
# *nothing*.
#
# To be consistent with Python idiom I suggest this should
# return [] e.g. `for e in []: print True` never prints
# True.
where = "index <= '%s' & index >= '%s'" % (beg_dt, end_dt)
results = [s for s in store.select(
'df', where=where, chunksize=chunksize)]
# should be []
tm.assert_equal(0, len(results))
def test_retain_index_attributes(self):
# GH 3499, losing frequency info on index recreation
df = DataFrame(dict(
A=Series(lrange(3),
index=date_range('2000-1-1', periods=3, freq='H'))))
with ensure_clean_store(self.path) as store:
_maybe_remove(store, 'data')
store.put('data', df, format='table')
result = store.get('data')
tm.assert_frame_equal(df, result)
for attr in ['freq', 'tz', 'name']:
for idx in ['index', 'columns']:
self.assertEqual(getattr(getattr(df, idx), attr, None),
getattr(getattr(result, idx), attr, None))
# try to append a table with a different frequency
with tm.assert_produces_warning(
expected_warning=AttributeConflictWarning):
df2 = DataFrame(dict(
A=Series(lrange(3),
index=date_range('2002-1-1',
periods=3, freq='D'))))
store.append('data', df2)
self.assertIsNone(store.get_storer('data').info['index']['freq'])
# this is ok
_maybe_remove(store, 'df2')
df2 = DataFrame(dict(
A=Series(lrange(3),
index=[Timestamp('20010101'), Timestamp('20010102'),
Timestamp('20020101')])))
store.append('df2', df2)
df3 = DataFrame(dict(
A=Series(lrange(3),
index=date_range('2002-1-1', periods=3,
freq='D'))))
store.append('df2', df3)
def test_retain_index_attributes2(self):
with ensure_clean_path(self.path) as path:
expected_warning = Warning if PY35 else AttributeConflictWarning
with tm.assert_produces_warning(expected_warning=expected_warning,
check_stacklevel=False):
df = DataFrame(dict(
A=Series(lrange(3),
index=date_range('2000-1-1',
periods=3, freq='H'))))
df.to_hdf(path, 'data', mode='w', append=True)
df2 = DataFrame(dict(
A=Series(lrange(3),
index=date_range('2002-1-1', periods=3,
freq='D'))))
df2.to_hdf(path, 'data', append=True)
idx = date_range('2000-1-1', periods=3, freq='H')
idx.name = 'foo'
df = DataFrame(dict(A=Series(lrange(3), index=idx)))
df.to_hdf(path, 'data', mode='w', append=True)
self.assertEqual(read_hdf(path, 'data').index.name, 'foo')
with tm.assert_produces_warning(expected_warning=expected_warning,
check_stacklevel=False):
idx2 = date_range('2001-1-1', periods=3, freq='H')
idx2.name = 'bar'
df2 = DataFrame(dict(A=Series(lrange(3), index=idx2)))
df2.to_hdf(path, 'data', append=True)
self.assertIsNone(read_hdf(path, 'data').index.name)
def test_panel_select(self):
wp = tm.makePanel()
with ensure_clean_store(self.path) as store:
store.put('wp', wp, format='table')
date = wp.major_axis[len(wp.major_axis) // 2]
crit1 = ('major_axis>=date')
crit2 = ("minor_axis=['A', 'D']")
result = store.select('wp', [crit1, crit2])
expected = wp.truncate(before=date).reindex(minor=['A', 'D'])
assert_panel_equal(result, expected)
result = store.select(
'wp', ['major_axis>="20000124"', ("minor_axis=['A', 'B']")])
expected = wp.truncate(before='20000124').reindex(minor=['A', 'B'])
assert_panel_equal(result, expected)
def test_frame_select(self):
df = tm.makeTimeDataFrame()
with ensure_clean_store(self.path) as store:
store.put('frame', df, format='table')
date = df.index[len(df) // 2]
crit1 = Term('index>=date')
self.assertEqual(crit1.env.scope['date'], date)
crit2 = ("columns=['A', 'D']")
crit3 = ('columns=A')
result = store.select('frame', [crit1, crit2])
expected = df.ix[date:, ['A', 'D']]
tm.assert_frame_equal(result, expected)
result = store.select('frame', [crit3])
expected = df.ix[:, ['A']]
tm.assert_frame_equal(result, expected)
# invalid terms
df = tm.makeTimeDataFrame()
store.append('df_time', df)
self.assertRaises(
ValueError, store.select, 'df_time', [Term("index>0")])
# can't select if not written as table
# store['frame'] = df
# self.assertRaises(ValueError, store.select,
# 'frame', [crit1, crit2])
def test_frame_select_complex(self):
# select via complex criteria
df = tm.makeTimeDataFrame()
df['string'] = 'foo'
df.loc[df.index[0:4], 'string'] = 'bar'
with ensure_clean_store(self.path) as store:
store.put('df', df, format='table', data_columns=['string'])
# empty
result = store.select('df', 'index>df.index[3] & string="bar"')
expected = df.loc[(df.index > df.index[3]) & (df.string == 'bar')]
tm.assert_frame_equal(result, expected)
result = store.select('df', 'index>df.index[3] & string="foo"')
expected = df.loc[(df.index > df.index[3]) & (df.string == 'foo')]
tm.assert_frame_equal(result, expected)
# or
result = store.select('df', 'index>df.index[3] | string="bar"')
expected = df.loc[(df.index > df.index[3]) | (df.string == 'bar')]
tm.assert_frame_equal(result, expected)
result = store.select('df', '(index>df.index[3] & '
'index<=df.index[6]) | string="bar"')
expected = df.loc[((df.index > df.index[3]) & (
df.index <= df.index[6])) | (df.string == 'bar')]
tm.assert_frame_equal(result, expected)
# invert
result = store.select('df', 'string!="bar"')
expected = df.loc[df.string != 'bar']
tm.assert_frame_equal(result, expected)
# invert not implemented in numexpr :(
self.assertRaises(NotImplementedError,
store.select, 'df', '~(string="bar")')
# invert ok for filters
result = store.select('df', "~(columns=['A','B'])")
expected = df.loc[:, df.columns.difference(['A', 'B'])]
tm.assert_frame_equal(result, expected)
# in
result = store.select(
'df', "index>df.index[3] & columns in ['A','B']")
expected = df.loc[df.index > df.index[3]].reindex(columns=[
'A', 'B'])
tm.assert_frame_equal(result, expected)
def test_frame_select_complex2(self):
with ensure_clean_path(['parms.hdf', 'hist.hdf']) as paths:
pp, hh = paths
# use non-trivial selection criteria
parms = DataFrame({'A': [1, 1, 2, 2, 3]})
parms.to_hdf(pp, 'df', mode='w',
format='table', data_columns=['A'])
selection = read_hdf(pp, 'df', where='A=[2,3]')
hist = DataFrame(np.random.randn(25, 1),
columns=['data'],
index=MultiIndex.from_tuples(
[(i, j) for i in range(5)
for j in range(5)],
names=['l1', 'l2']))
hist.to_hdf(hh, 'df', mode='w', format='table')
expected = read_hdf(hh, 'df', where=Term('l1', '=', [2, 3, 4]))
# list like
result = read_hdf(hh, 'df', where=Term(
'l1', '=', selection.index.tolist()))
assert_frame_equal(result, expected)
l = selection.index.tolist() # noqa
# sccope with list like
store = HDFStore(hh)
result = store.select('df', where='l1=l')
assert_frame_equal(result, expected)
store.close()
result = read_hdf(hh, 'df', where='l1=l')
assert_frame_equal(result, expected)
# index
index = selection.index # noqa
result = read_hdf(hh, 'df', where='l1=index')
assert_frame_equal(result, expected)
result = read_hdf(hh, 'df', where='l1=selection.index')
assert_frame_equal(result, expected)
result = read_hdf(hh, 'df', where='l1=selection.index.tolist()')
assert_frame_equal(result, expected)
result = read_hdf(hh, 'df', where='l1=list(selection.index)')
assert_frame_equal(result, expected)
# sccope with index
store = HDFStore(hh)
result = store.select('df', where='l1=index')
assert_frame_equal(result, expected)
result = store.select('df', where='l1=selection.index')
assert_frame_equal(result, expected)
result = store.select('df', where='l1=selection.index.tolist()')
assert_frame_equal(result, expected)
result = store.select('df', where='l1=list(selection.index)')
assert_frame_equal(result, expected)
store.close()
def test_invalid_filtering(self):
# can't use more than one filter (atm)
df = tm.makeTimeDataFrame()
with ensure_clean_store(self.path) as store:
store.put('df', df, format='table')
# not implemented
self.assertRaises(NotImplementedError, store.select,
'df', "columns=['A'] | columns=['B']")
# in theory we could deal with this
self.assertRaises(NotImplementedError, store.select,
'df', "columns=['A','B'] & columns=['C']")
def test_string_select(self):
# GH 2973
with ensure_clean_store(self.path) as store:
df = tm.makeTimeDataFrame()
# test string ==/!=
df['x'] = 'none'
df.ix[2:7, 'x'] = ''
store.append('df', df, data_columns=['x'])
result = store.select('df', Term('x=none'))
expected = df[df.x == 'none']
assert_frame_equal(result, expected)
try:
result = store.select('df', Term('x!=none'))
expected = df[df.x != 'none']
assert_frame_equal(result, expected)
except Exception as detail:
com.pprint_thing("[{0}]".format(detail))
com.pprint_thing(store)
com.pprint_thing(expected)
df2 = df.copy()
df2.loc[df2.x == '', 'x'] = np.nan
store.append('df2', df2, data_columns=['x'])
result = store.select('df2', Term('x!=none'))
expected = df2[isnull(df2.x)]
assert_frame_equal(result, expected)
# int ==/!=
df['int'] = 1
df.ix[2:7, 'int'] = 2
store.append('df3', df, data_columns=['int'])
result = store.select('df3', Term('int=2'))
expected = df[df.int == 2]
assert_frame_equal(result, expected)
result = store.select('df3', Term('int!=2'))
expected = df[df.int != 2]
assert_frame_equal(result, expected)
def test_read_column(self):
df = tm.makeTimeDataFrame()
with ensure_clean_store(self.path) as store:
_maybe_remove(store, 'df')
store.append('df', df)
# error
self.assertRaises(KeyError, store.select_column, 'df', 'foo')
def f():
store.select_column('df', 'index', where=['index>5'])
self.assertRaises(Exception, f)
# valid
result = store.select_column('df', 'index')
tm.assert_almost_equal(result.values, Series(df.index).values)
self.assertIsInstance(result, Series)
# not a data indexable column
self.assertRaises(
ValueError, store.select_column, 'df', 'values_block_0')
# a data column
df2 = df.copy()
df2['string'] = 'foo'
store.append('df2', df2, data_columns=['string'])
result = store.select_column('df2', 'string')
tm.assert_almost_equal(result.values, df2['string'].values)
# a data column with NaNs, result excludes the NaNs
df3 = df.copy()
df3['string'] = 'foo'
df3.ix[4:6, 'string'] = np.nan
store.append('df3', df3, data_columns=['string'])
result = store.select_column('df3', 'string')
tm.assert_almost_equal(result.values, df3['string'].values)
# start/stop
result = store.select_column('df3', 'string', start=2)
tm.assert_almost_equal(result.values, df3['string'].values[2:])
result = store.select_column('df3', 'string', start=-2)
tm.assert_almost_equal(result.values, df3['string'].values[-2:])
result = store.select_column('df3', 'string', stop=2)
tm.assert_almost_equal(result.values, df3['string'].values[:2])
result = store.select_column('df3', 'string', stop=-2)
tm.assert_almost_equal(result.values, df3['string'].values[:-2])
result = store.select_column('df3', 'string', start=2, stop=-2)
tm.assert_almost_equal(result.values, df3['string'].values[2:-2])
result = store.select_column('df3', 'string', start=-2, stop=2)
tm.assert_almost_equal(result.values, df3['string'].values[-2:2])
# GH 10392 - make sure column name is preserved
df4 = DataFrame({'A': np.random.randn(10), 'B': 'foo'})
store.append('df4', df4, data_columns=True)
expected = df4['B']
result = store.select_column('df4', 'B')
tm.assert_series_equal(result, expected)
def test_coordinates(self):
df = tm.makeTimeDataFrame()
with ensure_clean_store(self.path) as store:
_maybe_remove(store, 'df')
store.append('df', df)
# all
c = store.select_as_coordinates('df')
assert((c.values == np.arange(len(df.index))).all())
# get coordinates back & test vs frame
_maybe_remove(store, 'df')
df = DataFrame(dict(A=lrange(5), B=lrange(5)))
store.append('df', df)
c = store.select_as_coordinates('df', ['index<3'])
assert((c.values == np.arange(3)).all())
result = store.select('df', where=c)
expected = df.ix[0:2, :]
tm.assert_frame_equal(result, expected)
c = store.select_as_coordinates('df', ['index>=3', 'index<=4'])
assert((c.values == np.arange(2) + 3).all())
result = store.select('df', where=c)
expected = df.ix[3:4, :]
tm.assert_frame_equal(result, expected)
self.assertIsInstance(c, Index)
# multiple tables
_maybe_remove(store, 'df1')
_maybe_remove(store, 'df2')
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame().rename(columns=lambda x: "%s_2" % x)
store.append('df1', df1, data_columns=['A', 'B'])
store.append('df2', df2)
c = store.select_as_coordinates('df1', ['A>0', 'B>0'])
df1_result = store.select('df1', c)
df2_result = store.select('df2', c)
result = concat([df1_result, df2_result], axis=1)
expected = concat([df1, df2], axis=1)
expected = expected[(expected.A > 0) & (expected.B > 0)]
tm.assert_frame_equal(result, expected)
# pass array/mask as the coordinates
with ensure_clean_store(self.path) as store:
df = DataFrame(np.random.randn(1000, 2),
index=date_range('20000101', periods=1000))
store.append('df', df)
c = store.select_column('df', 'index')
where = c[DatetimeIndex(c).month == 5].index
expected = df.iloc[where]
# locations
result = store.select('df', where=where)
tm.assert_frame_equal(result, expected)
# boolean
result = store.select('df', where=where)
tm.assert_frame_equal(result, expected)
# invalid
self.assertRaises(ValueError, store.select, 'df',
where=np.arange(len(df), dtype='float64'))
self.assertRaises(ValueError, store.select, 'df',
where=np.arange(len(df) + 1))
self.assertRaises(ValueError, store.select, 'df',
where=np.arange(len(df)), start=5)
self.assertRaises(ValueError, store.select, 'df',
where=np.arange(len(df)), start=5, stop=10)
# selection with filter
selection = date_range('20000101', periods=500)
result = store.select('df', where='index in selection')
expected = df[df.index.isin(selection)]
tm.assert_frame_equal(result, expected)
# list
df = DataFrame(np.random.randn(10, 2))
store.append('df2', df)
result = store.select('df2', where=[0, 3, 5])
expected = df.iloc[[0, 3, 5]]
tm.assert_frame_equal(result, expected)
# boolean
where = [True] * 10
where[-2] = False
result = store.select('df2', where=where)
expected = df.loc[where]
tm.assert_frame_equal(result, expected)
# start/stop
result = store.select('df2', start=5, stop=10)
expected = df[5:10]
tm.assert_frame_equal(result, expected)
def test_append_to_multiple(self):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame().rename(columns=lambda x: "%s_2" % x)
df2['foo'] = 'bar'
df = concat([df1, df2], axis=1)
with ensure_clean_store(self.path) as store:
# exceptions
self.assertRaises(ValueError, store.append_to_multiple,
{'df1': ['A', 'B'], 'df2': None}, df,
selector='df3')
self.assertRaises(ValueError, store.append_to_multiple,
{'df1': None, 'df2': None}, df, selector='df3')
self.assertRaises(
ValueError, store.append_to_multiple, 'df1', df, 'df1')
# regular operation
store.append_to_multiple(
{'df1': ['A', 'B'], 'df2': None}, df, selector='df1')
result = store.select_as_multiple(
['df1', 'df2'], where=['A>0', 'B>0'], selector='df1')
expected = df[(df.A > 0) & (df.B > 0)]
tm.assert_frame_equal(result, expected)
def test_append_to_multiple_dropna(self):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame().rename(columns=lambda x: "%s_2" % x)
df1.ix[1, ['A', 'B']] = np.nan
df = concat([df1, df2], axis=1)
with ensure_clean_store(self.path) as store:
# dropna=True should guarantee rows are synchronized
store.append_to_multiple(
{'df1': ['A', 'B'], 'df2': None}, df, selector='df1',
dropna=True)
result = store.select_as_multiple(['df1', 'df2'])
expected = df.dropna()
tm.assert_frame_equal(result, expected)
tm.assert_index_equal(store.select('df1').index,
store.select('df2').index)
# dropna=False shouldn't synchronize row indexes
store.append_to_multiple(
{'df1': ['A', 'B'], 'df2': None}, df, selector='df1',
dropna=False)
self.assertRaises(
ValueError, store.select_as_multiple, ['df1', 'df2'])
assert not store.select('df1').index.equals(
store.select('df2').index)
def test_select_as_multiple(self):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame().rename(columns=lambda x: "%s_2" % x)
df2['foo'] = 'bar'
with ensure_clean_store(self.path) as store:
# no tables stored
self.assertRaises(Exception, store.select_as_multiple,
None, where=['A>0', 'B>0'], selector='df1')
store.append('df1', df1, data_columns=['A', 'B'])
store.append('df2', df2)
# exceptions
self.assertRaises(Exception, store.select_as_multiple,
None, where=['A>0', 'B>0'], selector='df1')
self.assertRaises(Exception, store.select_as_multiple,
[None], where=['A>0', 'B>0'], selector='df1')
self.assertRaises(KeyError, store.select_as_multiple,
['df1', 'df3'], where=['A>0', 'B>0'],
selector='df1')
self.assertRaises(KeyError, store.select_as_multiple,
['df3'], where=['A>0', 'B>0'], selector='df1')
self.assertRaises(KeyError, store.select_as_multiple,
['df1', 'df2'], where=['A>0', 'B>0'],
selector='df4')
# default select
result = store.select('df1', ['A>0', 'B>0'])
expected = store.select_as_multiple(
['df1'], where=['A>0', 'B>0'], selector='df1')
tm.assert_frame_equal(result, expected)
expected = store.select_as_multiple(
'df1', where=['A>0', 'B>0'], selector='df1')
tm.assert_frame_equal(result, expected)
# multiple
result = store.select_as_multiple(
['df1', 'df2'], where=['A>0', 'B>0'], selector='df1')
expected = concat([df1, df2], axis=1)
expected = expected[(expected.A > 0) & (expected.B > 0)]
tm.assert_frame_equal(result, expected)
# multiple (diff selector)
result = store.select_as_multiple(['df1', 'df2'], where=[Term(
'index>df2.index[4]')], selector='df2')
expected = concat([df1, df2], axis=1)
expected = expected[5:]
tm.assert_frame_equal(result, expected)
# test excpection for diff rows
store.append('df3', tm.makeTimeDataFrame(nper=50))
self.assertRaises(ValueError, store.select_as_multiple,
['df1', 'df3'], where=['A>0', 'B>0'],
selector='df1')
def test_nan_selection_bug_4858(self):
# GH 4858; nan selection bug, only works for pytables >= 3.1
if LooseVersion(tables.__version__) < '3.1.0':
raise nose.SkipTest('tables version does not support fix for nan '
'selection bug: GH 4858')
with ensure_clean_store(self.path) as store:
df = DataFrame(dict(cols=range(6), values=range(6)),
dtype='float64')
df['cols'] = (df['cols'] + 10).apply(str)
df.iloc[0] = np.nan
expected = DataFrame(dict(cols=['13.0', '14.0', '15.0'], values=[
3., 4., 5.]), index=[3, 4, 5])
# write w/o the index on that particular column
store.append('df', df, data_columns=True, index=['cols'])
result = store.select('df', where='values>2.0')
assert_frame_equal(result, expected)
def test_start_stop(self):
with ensure_clean_store(self.path) as store:
df = DataFrame(dict(A=np.random.rand(20), B=np.random.rand(20)))
store.append('df', df)
result = store.select(
'df', [Term("columns=['A']")], start=0, stop=5)
expected = df.ix[0:4, ['A']]
tm.assert_frame_equal(result, expected)
# out of range
result = store.select(
'df', [Term("columns=['A']")], start=30, stop=40)
assert(len(result) == 0)
assert(type(result) == DataFrame)
def test_select_filter_corner(self):
df = DataFrame(np.random.randn(50, 100))
df.index = ['%.3d' % c for c in df.index]
df.columns = ['%.3d' % c for c in df.columns]
with ensure_clean_store(self.path) as store:
store.put('frame', df, format='table')
crit = Term('columns=df.columns[:75]')
result = store.select('frame', [crit])
tm.assert_frame_equal(result, df.ix[:, df.columns[:75]])
crit = Term('columns=df.columns[:75:2]')
result = store.select('frame', [crit])
tm.assert_frame_equal(result, df.ix[:, df.columns[:75:2]])
def _check_roundtrip(self, obj, comparator, compression=False, **kwargs):
options = {}
if compression:
options['complib'] = _default_compressor
with ensure_clean_store(self.path, 'w', **options) as store:
store['obj'] = obj
retrieved = store['obj']
comparator(retrieved, obj, **kwargs)
def _check_double_roundtrip(self, obj, comparator, compression=False,
**kwargs):
options = {}
if compression:
options['complib'] = compression or _default_compressor
with ensure_clean_store(self.path, 'w', **options) as store:
store['obj'] = obj
retrieved = store['obj']
comparator(retrieved, obj, **kwargs)
store['obj'] = retrieved
again = store['obj']
comparator(again, obj, **kwargs)
def _check_roundtrip_table(self, obj, comparator, compression=False):
options = {}
if compression:
options['complib'] = _default_compressor
with ensure_clean_store(self.path, 'w', **options) as store:
store.put('obj', obj, format='table')
retrieved = store['obj']
# sorted_obj = _test_sort(obj)
comparator(retrieved, obj)
def test_multiple_open_close(self):
# GH 4409, open & close multiple times
with ensure_clean_path(self.path) as path:
df = tm.makeDataFrame()
df.to_hdf(path, 'df', mode='w', format='table')
# single
store = HDFStore(path)
self.assertNotIn('CLOSED', str(store))
self.assertTrue(store.is_open)
store.close()
self.assertIn('CLOSED', str(store))
self.assertFalse(store.is_open)
with ensure_clean_path(self.path) as path:
if pytables._table_file_open_policy_is_strict:
# multiples
store1 = HDFStore(path)
def f():
HDFStore(path)
self.assertRaises(ValueError, f)
store1.close()
else:
# multiples
store1 = HDFStore(path)
store2 = HDFStore(path)
self.assertNotIn('CLOSED', str(store1))
self.assertNotIn('CLOSED', str(store2))
self.assertTrue(store1.is_open)
self.assertTrue(store2.is_open)
store1.close()
self.assertIn('CLOSED', str(store1))
self.assertFalse(store1.is_open)
self.assertNotIn('CLOSED', str(store2))
self.assertTrue(store2.is_open)
store2.close()
self.assertIn('CLOSED', str(store1))
self.assertIn('CLOSED', str(store2))
self.assertFalse(store1.is_open)
self.assertFalse(store2.is_open)
# nested close
store = HDFStore(path, mode='w')
store.append('df', df)
store2 = HDFStore(path)
store2.append('df2', df)
store2.close()
self.assertIn('CLOSED', str(store2))
self.assertFalse(store2.is_open)
store.close()
self.assertIn('CLOSED', str(store))
self.assertFalse(store.is_open)
# double closing
store = HDFStore(path, mode='w')
store.append('df', df)
store2 = HDFStore(path)
store.close()
self.assertIn('CLOSED', str(store))
self.assertFalse(store.is_open)
store2.close()
self.assertIn('CLOSED', str(store2))
self.assertFalse(store2.is_open)
# ops on a closed store
with ensure_clean_path(self.path) as path:
df = tm.makeDataFrame()
df.to_hdf(path, 'df', mode='w', format='table')
store = HDFStore(path)
store.close()
self.assertRaises(ClosedFileError, store.keys)
self.assertRaises(ClosedFileError, lambda: 'df' in store)
self.assertRaises(ClosedFileError, lambda: len(store))
self.assertRaises(ClosedFileError, lambda: store['df'])
self.assertRaises(ClosedFileError, lambda: store.df)
self.assertRaises(ClosedFileError, store.select, 'df')
self.assertRaises(ClosedFileError, store.get, 'df')
self.assertRaises(ClosedFileError, store.append, 'df2', df)
self.assertRaises(ClosedFileError, store.put, 'df3', df)
self.assertRaises(ClosedFileError, store.get_storer, 'df2')
self.assertRaises(ClosedFileError, store.remove, 'df2')
def f():
store.select('df')
tm.assertRaisesRegexp(ClosedFileError, 'file is not open', f)
def test_pytables_native_read(self):
with ensure_clean_store(
tm.get_data_path('legacy_hdf/pytables_native.h5'),
mode='r') as store:
d2 = store['detector/readout']
self.assertIsInstance(d2, DataFrame)
def test_pytables_native2_read(self):
# fails on win/3.5 oddly
if PY35 and is_platform_windows():
raise nose.SkipTest("native2 read fails oddly on windows / 3.5")
with ensure_clean_store(
tm.get_data_path('legacy_hdf/pytables_native2.h5'),
mode='r') as store:
str(store)
d1 = store['detector']
self.assertIsInstance(d1, DataFrame)
def test_legacy_read(self):
with ensure_clean_store(
tm.get_data_path('legacy_hdf/legacy.h5'),
mode='r') as store:
store['a']
store['b']
store['c']
store['d']
def test_legacy_table_read(self):
# legacy table types
with ensure_clean_store(
tm.get_data_path('legacy_hdf/legacy_table.h5'),
mode='r') as store:
store.select('df1')
store.select('df2')
store.select('wp1')
# force the frame
store.select('df2', typ='legacy_frame')
# old version warning
with tm.assert_produces_warning(
expected_warning=IncompatibilityWarning):
self.assertRaises(
Exception, store.select, 'wp1', Term('minor_axis=B'))
df2 = store.select('df2')
result = store.select('df2', Term('index>df2.index[2]'))
expected = df2[df2.index > df2.index[2]]
assert_frame_equal(expected, result)
def test_legacy_0_10_read(self):
# legacy from 0.10
with ensure_clean_store(
tm.get_data_path('legacy_hdf/legacy_0.10.h5'),
mode='r') as store:
str(store)
for k in store.keys():
store.select(k)
def test_legacy_0_11_read(self):
# legacy from 0.11
path = os.path.join('legacy_hdf', 'legacy_table_0.11.h5')
with ensure_clean_store(tm.get_data_path(path), mode='r') as store:
str(store)
assert 'df' in store
assert 'df1' in store
assert 'mi' in store
df = store.select('df')
df1 = store.select('df1')
mi = store.select('mi')
assert isinstance(df, DataFrame)
assert isinstance(df1, DataFrame)
assert isinstance(mi, DataFrame)
def test_copy(self):
def do_copy(f=None, new_f=None, keys=None, propindexes=True, **kwargs):
try:
if f is None:
f = tm.get_data_path(os.path.join('legacy_hdf',
'legacy_0.10.h5'))
store = HDFStore(f, 'r')
if new_f is None:
import tempfile
fd, new_f = tempfile.mkstemp()
tstore = store.copy(
new_f, keys=keys, propindexes=propindexes, **kwargs)
# check keys
if keys is None:
keys = store.keys()
self.assertEqual(set(keys), set(tstore.keys()))
# check indicies & nrows
for k in tstore.keys():
if tstore.get_storer(k).is_table:
new_t = tstore.get_storer(k)
orig_t = store.get_storer(k)
self.assertEqual(orig_t.nrows, new_t.nrows)
# check propindixes
if propindexes:
for a in orig_t.axes:
if a.is_indexed:
self.assertTrue(new_t[a.name].is_indexed)
finally:
safe_close(store)
safe_close(tstore)
try:
os.close(fd)
except:
pass
safe_remove(new_f)
do_copy()
do_copy(keys=['/a', '/b', '/df1_mixed'])
do_copy(propindexes=False)
# new table
df = tm.makeDataFrame()
try:
path = create_tempfile(self.path)
st = HDFStore(path)
st.append('df', df, data_columns=['A'])
st.close()
do_copy(f=path)
do_copy(f=path, propindexes=False)
finally:
safe_remove(path)
def test_legacy_table_write(self):
raise nose.SkipTest("cannot write legacy tables")
store = HDFStore(tm.get_data_path(
'legacy_hdf/legacy_table_%s.h5' % pandas.__version__), 'a')
df = tm.makeDataFrame()
wp = tm.makePanel()
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
['one', 'two', 'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['foo', 'bar'])
df = DataFrame(np.random.randn(10, 3), index=index,
columns=['A', 'B', 'C'])
store.append('mi', df)
df = DataFrame(dict(A='foo', B='bar'), index=lrange(10))
store.append('df', df, data_columns=['B'], min_itemsize={'A': 200})
store.append('wp', wp)
store.close()
def test_store_datetime_fractional_secs(self):
with ensure_clean_store(self.path) as store:
dt = datetime.datetime(2012, 1, 2, 3, 4, 5, 123456)
series = Series([0], [dt])
store['a'] = series
self.assertEqual(store['a'].index[0], dt)
def test_tseries_indices_series(self):
with ensure_clean_store(self.path) as store:
idx = tm.makeDateIndex(10)
ser = Series(np.random.randn(len(idx)), idx)
store['a'] = ser
result = store['a']
assert_series_equal(result, ser)
self.assertEqual(type(result.index), type(ser.index))
self.assertEqual(result.index.freq, ser.index.freq)
idx = tm.makePeriodIndex(10)
ser = Series(np.random.randn(len(idx)), idx)
store['a'] = ser
result = store['a']
assert_series_equal(result, ser)
self.assertEqual(type(result.index), type(ser.index))
self.assertEqual(result.index.freq, ser.index.freq)
def test_tseries_indices_frame(self):
with ensure_clean_store(self.path) as store:
idx = tm.makeDateIndex(10)
df = DataFrame(np.random.randn(len(idx), 3), index=idx)
store['a'] = df
result = store['a']
assert_frame_equal(result, df)
self.assertEqual(type(result.index), type(df.index))
self.assertEqual(result.index.freq, df.index.freq)
idx = tm.makePeriodIndex(10)
df = DataFrame(np.random.randn(len(idx), 3), idx)
store['a'] = df
result = store['a']
assert_frame_equal(result, df)
self.assertEqual(type(result.index), type(df.index))
self.assertEqual(result.index.freq, df.index.freq)
def test_unicode_index(self):
unicode_values = [u('\u03c3'), u('\u03c3\u03c3')]
def f():
s = Series(np.random.randn(len(unicode_values)), unicode_values)
self._check_roundtrip(s, tm.assert_series_equal)
compat_assert_produces_warning(PerformanceWarning, f)
def test_unicode_longer_encoded(self):
# GH 11234
char = '\u0394'
df = pd.DataFrame({'A': [char]})
with ensure_clean_store(self.path) as store:
store.put('df', df, format='table', encoding='utf-8')
result = store.get('df')
tm.assert_frame_equal(result, df)
df = pd.DataFrame({'A': ['a', char], 'B': ['b', 'b']})
with ensure_clean_store(self.path) as store:
store.put('df', df, format='table', encoding='utf-8')
result = store.get('df')
tm.assert_frame_equal(result, df)
def test_store_datetime_mixed(self):
df = DataFrame(
{'a': [1, 2, 3], 'b': [1., 2., 3.], 'c': ['a', 'b', 'c']})
ts = tm.makeTimeSeries()
df['d'] = ts.index[:3]
self._check_roundtrip(df, tm.assert_frame_equal)
# def test_cant_write_multiindex_table(self):
# # for now, #1848
# df = DataFrame(np.random.randn(10, 4),
# index=[np.arange(5).repeat(2),
# np.tile(np.arange(2), 5)])
# self.assertRaises(Exception, store.put, 'foo', df, format='table')
def test_append_with_diff_col_name_types_raises_value_error(self):
df = DataFrame(np.random.randn(10, 1))
df2 = DataFrame({'a': np.random.randn(10)})
df3 = DataFrame({(1, 2): np.random.randn(10)})
df4 = DataFrame({('1', 2): np.random.randn(10)})
df5 = DataFrame({('1', 2, object): np.random.randn(10)})
with ensure_clean_store(self.path) as store:
name = 'df_%s' % tm.rands(10)
store.append(name, df)
for d in (df2, df3, df4, df5):
with tm.assertRaises(ValueError):
store.append(name, d)
def test_query_with_nested_special_character(self):
df = DataFrame({'a': ['a', 'a', 'c', 'b',
'test & test', 'c', 'b', 'e'],
'b': [1, 2, 3, 4, 5, 6, 7, 8]})
expected = df[df.a == 'test & test']
with ensure_clean_store(self.path) as store:
store.append('test', df, format='table', data_columns=True)
result = store.select('test', 'a = "test & test"')
tm.assert_frame_equal(expected, result)
def test_categorical(self):
with ensure_clean_store(self.path) as store:
# basic
_maybe_remove(store, 's')
s = Series(Categorical(['a', 'b', 'b', 'a', 'a', 'c'], categories=[
'a', 'b', 'c', 'd'], ordered=False))
store.append('s', s, format='table')
result = store.select('s')
tm.assert_series_equal(s, result)
_maybe_remove(store, 's_ordered')
s = Series(Categorical(['a', 'b', 'b', 'a', 'a', 'c'], categories=[
'a', 'b', 'c', 'd'], ordered=True))
store.append('s_ordered', s, format='table')
result = store.select('s_ordered')
tm.assert_series_equal(s, result)
_maybe_remove(store, 'df')
df = DataFrame({"s": s, "vals": [1, 2, 3, 4, 5, 6]})
store.append('df', df, format='table')
result = store.select('df')
tm.assert_frame_equal(result, df)
# dtypes
s = Series([1, 1, 2, 2, 3, 4, 5]).astype('category')
store.append('si', s)
result = store.select('si')
tm.assert_series_equal(result, s)
s = Series([1, 1, np.nan, 2, 3, 4, 5]).astype('category')
store.append('si2', s)
result = store.select('si2')
tm.assert_series_equal(result, s)
# multiple
df2 = df.copy()
df2['s2'] = Series(list('abcdefg')).astype('category')
store.append('df2', df2)
result = store.select('df2')
tm.assert_frame_equal(result, df2)
# make sure the metadata is ok
self.assertTrue('/df2 ' in str(store))
self.assertTrue('/df2/meta/values_block_0/meta' in str(store))
self.assertTrue('/df2/meta/values_block_1/meta' in str(store))
# unordered
s = Series(Categorical(['a', 'b', 'b', 'a', 'a', 'c'], categories=[
'a', 'b', 'c', 'd'], ordered=False))
store.append('s2', s, format='table')
result = store.select('s2')
tm.assert_series_equal(result, s)
# query
store.append('df3', df, data_columns=['s'])
expected = df[df.s.isin(['b', 'c'])]
result = store.select('df3', where=['s in ["b","c"]'])
tm.assert_frame_equal(result, expected)
expected = df[df.s.isin(['b', 'c'])]
result = store.select('df3', where=['s = ["b","c"]'])
tm.assert_frame_equal(result, expected)
expected = df[df.s.isin(['d'])]
result = store.select('df3', where=['s in ["d"]'])
tm.assert_frame_equal(result, expected)
expected = df[df.s.isin(['f'])]
result = store.select('df3', where=['s in ["f"]'])
tm.assert_frame_equal(result, expected)
# appending with same categories is ok
store.append('df3', df)
df = concat([df, df])
expected = df[df.s.isin(['b', 'c'])]
result = store.select('df3', where=['s in ["b","c"]'])
tm.assert_frame_equal(result, expected)
# appending must have the same categories
df3 = df.copy()
df3['s'].cat.remove_unused_categories(inplace=True)
self.assertRaises(ValueError, lambda: store.append('df3', df3))
# remove
# make sure meta data is removed (its a recursive removal so should
# be)
result = store.select('df3/meta/s/meta')
self.assertIsNotNone(result)
store.remove('df3')
self.assertRaises(
KeyError, lambda: store.select('df3/meta/s/meta'))
def test_duplicate_column_name(self):
df = DataFrame(columns=["a", "a"], data=[[0, 0]])
with ensure_clean_path(self.path) as path:
self.assertRaises(ValueError, df.to_hdf,
path, 'df', format='fixed')
df.to_hdf(path, 'df', format='table')
other = read_hdf(path, 'df')
tm.assert_frame_equal(df, other)
self.assertTrue(df.equals(other))
self.assertTrue(other.equals(df))
def test_round_trip_equals(self):
# GH 9330
df = DataFrame({"B": [1, 2], "A": ["x", "y"]})
with ensure_clean_path(self.path) as path:
df.to_hdf(path, 'df', format='table')
other = read_hdf(path, 'df')
tm.assert_frame_equal(df, other)
self.assertTrue(df.equals(other))
self.assertTrue(other.equals(df))
def test_preserve_timedeltaindex_type(self):
# GH9635
# Storing TimedeltaIndexed DataFrames in fixed stores did not preserve
# the type of the index.
df = DataFrame(np.random.normal(size=(10, 5)))
df.index = timedelta_range(
start='0s', periods=10, freq='1s', name='example')
with ensure_clean_store(self.path) as store:
store['df'] = df
assert_frame_equal(store['df'], df)
def test_colums_multiindex_modified(self):
# BUG: 7212
# read_hdf store.select modified the passed columns parameters
# when multi-indexed.
df = DataFrame(np.random.rand(4, 5),
index=list('abcd'),
columns=list('ABCDE'))
df.index.name = 'letters'
df = df.set_index(keys='E', append=True)
data_columns = df.index.names + df.columns.tolist()
with ensure_clean_path(self.path) as path:
df.to_hdf(path, 'df',
mode='a',
append=True,
data_columns=data_columns,
index=False)
cols2load = list('BCD')
cols2load_original = list(cols2load)
df_loaded = read_hdf(path, 'df', columns=cols2load) # noqa
self.assertTrue(cols2load_original == cols2load)
def test_to_hdf_with_object_column_names(self):
# GH9057
# Writing HDF5 table format should only work for string-like
# column types
types_should_fail = [tm.makeIntIndex, tm.makeFloatIndex,
tm.makeDateIndex, tm.makeTimedeltaIndex,
tm.makePeriodIndex]
types_should_run = [tm.makeStringIndex, tm.makeCategoricalIndex]
if compat.PY3:
types_should_run.append(tm.makeUnicodeIndex)
else:
types_should_fail.append(tm.makeUnicodeIndex)
for index in types_should_fail:
df = DataFrame(np.random.randn(10, 2), columns=index(2))
with ensure_clean_path(self.path) as path:
with self.assertRaises(
ValueError, msg=("cannot have non-object label "
"DataIndexableCol")):
df.to_hdf(path, 'df', format='table', data_columns=True)
for index in types_should_run:
df = DataFrame(np.random.randn(10, 2), columns=index(2))
with ensure_clean_path(self.path) as path:
df.to_hdf(path, 'df', format='table', data_columns=True)
result = pd.read_hdf(
path, 'df', where="index = [{0}]".format(df.index[0]))
assert(len(result))
def test_read_hdf_open_store(self):
# GH10330
# No check for non-string path_or-buf, and no test of open store
df = DataFrame(np.random.rand(4, 5),
index=list('abcd'),
columns=list('ABCDE'))
df.index.name = 'letters'
df = df.set_index(keys='E', append=True)
with ensure_clean_path(self.path) as path:
df.to_hdf(path, 'df', mode='w')
direct = read_hdf(path, 'df')
store = HDFStore(path, mode='r')
indirect = read_hdf(store, 'df')
tm.assert_frame_equal(direct, indirect)
self.assertTrue(store.is_open)
store.close()
def test_read_hdf_iterator(self):
df = DataFrame(np.random.rand(4, 5),
index=list('abcd'),
columns=list('ABCDE'))
df.index.name = 'letters'
df = df.set_index(keys='E', append=True)
with ensure_clean_path(self.path) as path:
df.to_hdf(path, 'df', mode='w', format='t')
direct = read_hdf(path, 'df')
iterator = read_hdf(path, 'df', iterator=True)
self.assertTrue(isinstance(iterator, TableIterator))
indirect = next(iterator.__iter__())
tm.assert_frame_equal(direct, indirect)
iterator.store.close()
def test_read_hdf_errors(self):
df = DataFrame(np.random.rand(4, 5),
index=list('abcd'),
columns=list('ABCDE'))
with ensure_clean_path(self.path) as path:
self.assertRaises(IOError, read_hdf, path, 'key')
df.to_hdf(path, 'df')
store = HDFStore(path, mode='r')
store.close()
self.assertRaises(IOError, read_hdf, store, 'df')
with open(path, mode='r') as store:
self.assertRaises(NotImplementedError, read_hdf, store, 'df')
def test_invalid_complib(self):
df = DataFrame(np.random.rand(4, 5),
index=list('abcd'),
columns=list('ABCDE'))
with ensure_clean_path(self.path) as path:
self.assertRaises(ValueError, df.to_hdf, path,
'df', complib='blosc:zlib')
# GH10443
def test_read_nokey(self):
df = DataFrame(np.random.rand(4, 5),
index=list('abcd'),
columns=list('ABCDE'))
with ensure_clean_path(self.path) as path:
df.to_hdf(path, 'df', mode='a')
reread = read_hdf(path)
assert_frame_equal(df, reread)
df.to_hdf(path, 'df2', mode='a')
self.assertRaises(ValueError, read_hdf, path)
class TestHDFComplexValues(Base):
# GH10447
def test_complex_fixed(self):
df = DataFrame(np.random.rand(4, 5).astype(np.complex64),
index=list('abcd'),
columns=list('ABCDE'))
with ensure_clean_path(self.path) as path:
df.to_hdf(path, 'df')
reread = read_hdf(path, 'df')
assert_frame_equal(df, reread)
df = DataFrame(np.random.rand(4, 5).astype(np.complex128),
index=list('abcd'),
columns=list('ABCDE'))
with ensure_clean_path(self.path) as path:
df.to_hdf(path, 'df')
reread = read_hdf(path, 'df')
assert_frame_equal(df, reread)
def test_complex_table(self):
df = DataFrame(np.random.rand(4, 5).astype(np.complex64),
index=list('abcd'),
columns=list('ABCDE'))
with ensure_clean_path(self.path) as path:
df.to_hdf(path, 'df', format='table')
reread = read_hdf(path, 'df')
assert_frame_equal(df, reread)
df = DataFrame(np.random.rand(4, 5).astype(np.complex128),
index=list('abcd'),
columns=list('ABCDE'))
with ensure_clean_path(self.path) as path:
df.to_hdf(path, 'df', format='table', mode='w')
reread = read_hdf(path, 'df')
assert_frame_equal(df, reread)
def test_complex_mixed_fixed(self):
complex64 = np.array([1.0 + 1.0j, 1.0 + 1.0j,
1.0 + 1.0j, 1.0 + 1.0j], dtype=np.complex64)
complex128 = np.array([1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j],
dtype=np.complex128)
df = DataFrame({'A': [1, 2, 3, 4],
'B': ['a', 'b', 'c', 'd'],
'C': complex64,
'D': complex128,
'E': [1.0, 2.0, 3.0, 4.0]},
index=list('abcd'))
with ensure_clean_path(self.path) as path:
df.to_hdf(path, 'df')
reread = read_hdf(path, 'df')
assert_frame_equal(df, reread)
def test_complex_mixed_table(self):
complex64 = np.array([1.0 + 1.0j, 1.0 + 1.0j,
1.0 + 1.0j, 1.0 + 1.0j], dtype=np.complex64)
complex128 = np.array([1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j],
dtype=np.complex128)
df = DataFrame({'A': [1, 2, 3, 4],
'B': ['a', 'b', 'c', 'd'],
'C': complex64,
'D': complex128,
'E': [1.0, 2.0, 3.0, 4.0]},
index=list('abcd'))
with ensure_clean_store(self.path) as store:
store.append('df', df, data_columns=['A', 'B'])
result = store.select('df', where=Term('A>2'))
assert_frame_equal(df.loc[df.A > 2], result)
with ensure_clean_path(self.path) as path:
df.to_hdf(path, 'df', format='table')
reread = read_hdf(path, 'df')
assert_frame_equal(df, reread)
def test_complex_across_dimensions_fixed(self):
complex128 = np.array([1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j])
s = Series(complex128, index=list('abcd'))
df = DataFrame({'A': s, 'B': s})
p = Panel({'One': df, 'Two': df})
objs = [s, df, p]
comps = [tm.assert_series_equal, tm.assert_frame_equal,
tm.assert_panel_equal]
for obj, comp in zip(objs, comps):
with ensure_clean_path(self.path) as path:
obj.to_hdf(path, 'obj', format='fixed')
reread = read_hdf(path, 'obj')
comp(obj, reread)
def test_complex_across_dimensions(self):
complex128 = np.array([1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j])
s = Series(complex128, index=list('abcd'))
df = DataFrame({'A': s, 'B': s})
p = Panel({'One': df, 'Two': df})
p4d = pd.Panel4D({'i': p, 'ii': p})
objs = [df, p, p4d]
comps = [tm.assert_frame_equal, tm.assert_panel_equal,
tm.assert_panel4d_equal]
for obj, comp in zip(objs, comps):
with ensure_clean_path(self.path) as path:
obj.to_hdf(path, 'obj', format='table')
reread = read_hdf(path, 'obj')
comp(obj, reread)
def test_complex_indexing_error(self):
complex128 = np.array([1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j],
dtype=np.complex128)
df = DataFrame({'A': [1, 2, 3, 4],
'B': ['a', 'b', 'c', 'd'],
'C': complex128},
index=list('abcd'))
with ensure_clean_store(self.path) as store:
self.assertRaises(TypeError, store.append,
'df', df, data_columns=['C'])
def test_complex_series_error(self):
complex128 = np.array([1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j])
s = Series(complex128, index=list('abcd'))
with ensure_clean_path(self.path) as path:
self.assertRaises(TypeError, s.to_hdf, path, 'obj', format='t')
with ensure_clean_path(self.path) as path:
s.to_hdf(path, 'obj', format='t', index=False)
reread = read_hdf(path, 'obj')
tm.assert_series_equal(s, reread)
def test_complex_append(self):
df = DataFrame({'a': np.random.randn(100).astype(np.complex128),
'b': np.random.randn(100)})
with ensure_clean_store(self.path) as store:
store.append('df', df, data_columns=['b'])
store.append('df', df)
result = store.select('df')
assert_frame_equal(pd.concat([df, df], 0), result)
class TestTimezones(Base, tm.TestCase):
def _compare_with_tz(self, a, b):
tm.assert_frame_equal(a, b)
# compare the zones on each element
for c in a.columns:
for i in a.index:
a_e = a.loc[i, c]
b_e = b.loc[i, c]
if not (a_e == b_e and a_e.tz == b_e.tz):
raise AssertionError(
"invalid tz comparsion [%s] [%s]" % (a_e, b_e))
def test_append_with_timezones_dateutil(self):
from datetime import timedelta
tm._skip_if_no_dateutil()
# use maybe_get_tz instead of dateutil.tz.gettz to handle the windows
# filename issues.
from pandas.tslib import maybe_get_tz
gettz = lambda x: maybe_get_tz('dateutil/' + x)
# as columns
with ensure_clean_store(self.path) as store:
_maybe_remove(store, 'df_tz')
df = DataFrame(dict(A=[Timestamp('20130102 2:00:00', tz=gettz(
'US/Eastern')) + timedelta(hours=1) * i for i in range(5)]))
store.append('df_tz', df, data_columns=['A'])
result = store['df_tz']
self._compare_with_tz(result, df)
assert_frame_equal(result, df)
# select with tz aware
expected = df[df.A >= df.A[3]]
result = store.select('df_tz', where=Term('A>=df.A[3]'))
self._compare_with_tz(result, expected)
# ensure we include dates in DST and STD time here.
_maybe_remove(store, 'df_tz')
df = DataFrame(dict(A=Timestamp('20130102',
tz=gettz('US/Eastern')),
B=Timestamp('20130603',
tz=gettz('US/Eastern'))),
index=range(5))
store.append('df_tz', df)
result = store['df_tz']
self._compare_with_tz(result, df)
assert_frame_equal(result, df)
df = DataFrame(dict(A=Timestamp('20130102',
tz=gettz('US/Eastern')),
B=Timestamp('20130102', tz=gettz('EET'))),
index=range(5))
self.assertRaises(ValueError, store.append, 'df_tz', df)
# this is ok
_maybe_remove(store, 'df_tz')
store.append('df_tz', df, data_columns=['A', 'B'])
result = store['df_tz']
self._compare_with_tz(result, df)
assert_frame_equal(result, df)
# can't append with diff timezone
df = DataFrame(dict(A=Timestamp('20130102',
tz=gettz('US/Eastern')),
B=Timestamp('20130102', tz=gettz('CET'))),
index=range(5))
self.assertRaises(ValueError, store.append, 'df_tz', df)
# as index
with ensure_clean_store(self.path) as store:
# GH 4098 example
df = DataFrame(dict(A=Series(lrange(3), index=date_range(
'2000-1-1', periods=3, freq='H', tz=gettz('US/Eastern')))))
_maybe_remove(store, 'df')
store.put('df', df)
result = store.select('df')
assert_frame_equal(result, df)
_maybe_remove(store, 'df')
store.append('df', df)
result = store.select('df')
assert_frame_equal(result, df)
def test_append_with_timezones_pytz(self):
from datetime import timedelta
# as columns
with ensure_clean_store(self.path) as store:
_maybe_remove(store, 'df_tz')
df = DataFrame(dict(A=[Timestamp('20130102 2:00:00',
tz='US/Eastern') +
timedelta(hours=1) * i
for i in range(5)]))
store.append('df_tz', df, data_columns=['A'])
result = store['df_tz']
self._compare_with_tz(result, df)
assert_frame_equal(result, df)
# select with tz aware
self._compare_with_tz(store.select(
'df_tz', where=Term('A>=df.A[3]')), df[df.A >= df.A[3]])
_maybe_remove(store, 'df_tz')
# ensure we include dates in DST and STD time here.
df = DataFrame(dict(A=Timestamp('20130102', tz='US/Eastern'),
B=Timestamp('20130603', tz='US/Eastern')),
index=range(5))
store.append('df_tz', df)
result = store['df_tz']
self._compare_with_tz(result, df)
assert_frame_equal(result, df)
df = DataFrame(dict(A=Timestamp('20130102', tz='US/Eastern'),
B=Timestamp('20130102', tz='EET')),
index=range(5))
self.assertRaises(ValueError, store.append, 'df_tz', df)
# this is ok
_maybe_remove(store, 'df_tz')
store.append('df_tz', df, data_columns=['A', 'B'])
result = store['df_tz']
self._compare_with_tz(result, df)
assert_frame_equal(result, df)
# can't append with diff timezone
df = DataFrame(dict(A=Timestamp('20130102', tz='US/Eastern'),
B=Timestamp('20130102', tz='CET')),
index=range(5))
self.assertRaises(ValueError, store.append, 'df_tz', df)
# as index
with ensure_clean_store(self.path) as store:
# GH 4098 example
df = DataFrame(dict(A=Series(lrange(3), index=date_range(
'2000-1-1', periods=3, freq='H', tz='US/Eastern'))))
_maybe_remove(store, 'df')
store.put('df', df)
result = store.select('df')
assert_frame_equal(result, df)
_maybe_remove(store, 'df')
store.append('df', df)
result = store.select('df')
assert_frame_equal(result, df)
def test_tseries_select_index_column(self):
# GH7777
# selecting a UTC datetimeindex column did
# not preserve UTC tzinfo set before storing
# check that no tz still works
rng = date_range('1/1/2000', '1/30/2000')
frame = DataFrame(np.random.randn(len(rng), 4), index=rng)
with ensure_clean_store(self.path) as store:
store.append('frame', frame)
result = store.select_column('frame', 'index')
self.assertEqual(rng.tz, DatetimeIndex(result.values).tz)
# check utc
rng = date_range('1/1/2000', '1/30/2000', tz='UTC')
frame = DataFrame(np.random.randn(len(rng), 4), index=rng)
with ensure_clean_store(self.path) as store:
store.append('frame', frame)
result = store.select_column('frame', 'index')
self.assertEqual(rng.tz, result.dt.tz)
# double check non-utc
rng = date_range('1/1/2000', '1/30/2000', tz='US/Eastern')
frame = DataFrame(np.random.randn(len(rng), 4), index=rng)
with ensure_clean_store(self.path) as store:
store.append('frame', frame)
result = store.select_column('frame', 'index')
self.assertEqual(rng.tz, result.dt.tz)
def test_timezones_fixed(self):
with ensure_clean_store(self.path) as store:
# index
rng = date_range('1/1/2000', '1/30/2000', tz='US/Eastern')
df = DataFrame(np.random.randn(len(rng), 4), index=rng)
store['df'] = df
result = store['df']
assert_frame_equal(result, df)
# as data
# GH11411
_maybe_remove(store, 'df')
df = DataFrame({'A': rng,
'B': rng.tz_convert('UTC').tz_localize(None),
'C': rng.tz_convert('CET'),
'D': range(len(rng))}, index=rng)
store['df'] = df
result = store['df']
assert_frame_equal(result, df)
def test_fixed_offset_tz(self):
rng = date_range('1/1/2000 00:00:00-07:00', '1/30/2000 00:00:00-07:00')
frame = DataFrame(np.random.randn(len(rng), 4), index=rng)
with ensure_clean_store(self.path) as store:
store['frame'] = frame
recons = store['frame']
self.assertTrue(recons.index.equals(rng))
self.assertEqual(rng.tz, recons.index.tz)
def test_store_timezone(self):
# GH2852
# issue storing datetime.date with a timezone as it resets when read
# back in a new timezone
import platform
if platform.system() == "Windows":
raise nose.SkipTest("timezone setting not supported on windows")
import datetime
import time
import os
# original method
with ensure_clean_store(self.path) as store:
today = datetime.date(2013, 9, 10)
df = DataFrame([1, 2, 3], index=[today, today, today])
store['obj1'] = df
result = store['obj1']
assert_frame_equal(result, df)
# with tz setting
orig_tz = os.environ.get('TZ')
def setTZ(tz):
if tz is None:
try:
del os.environ['TZ']
except:
pass
else:
os.environ['TZ'] = tz
time.tzset()
try:
with ensure_clean_store(self.path) as store:
setTZ('EST5EDT')
today = datetime.date(2013, 9, 10)
df = DataFrame([1, 2, 3], index=[today, today, today])
store['obj1'] = df
setTZ('CST6CDT')
result = store['obj1']
assert_frame_equal(result, df)
finally:
setTZ(orig_tz)
def test_legacy_datetimetz_object(self):
# legacy from < 0.17.0
# 8260
expected = DataFrame(dict(A=Timestamp('20130102', tz='US/Eastern'),
B=Timestamp('20130603', tz='CET')),
index=range(5))
with ensure_clean_store(
tm.get_data_path('legacy_hdf/datetimetz_object.h5'),
mode='r') as store:
result = store['df']
assert_frame_equal(result, expected)
def test_dst_transitions(self):
# make sure we are not failing on transaitions
with ensure_clean_store(self.path) as store:
times = pd.date_range("2013-10-26 23:00", "2013-10-27 01:00",
tz="Europe/London",
freq="H",
ambiguous='infer')
for i in [times, times + pd.Timedelta('10min')]:
_maybe_remove(store, 'df')
df = DataFrame({'A': range(len(i)), 'B': i}, index=i)
store.append('df', df)
result = store.select('df')
assert_frame_equal(result, df)
def _test_sort(obj):
if isinstance(obj, DataFrame):
return obj.reindex(sorted(obj.index))
elif isinstance(obj, Panel):
return obj.reindex(major=sorted(obj.major_axis))
else:
raise ValueError('type not supported here')
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| gpl-2.0 |
boada/planckClusters | analysis_op/plots/plot_cluster_forecast.py | 1 | 4471 | #!/usr/bin/env python3
from astropy.table import Table
import numpy
import matplotlib.pyplot as plt
from pyTinker import tinker
def generate_Tinker(mass, z1, z2, dz):
h = 0.7
# Initialize and generate the dn/dm
t = tinker.counts(
Om=0.3,
OL=0.7,
Ob=0.0456,
H0=100.0,
h0=0.70,
ITRANS=5,
sigma8=0.82,
spectral_index=0.963)
t.get_dndM(z1=z1, z2=z2, dz=dz, delta=500)
nm = len(mass)
nz = len(t.zx)
dndz = numpy.zeros((nm, nz))
Ns = numpy.zeros((nm, nz))
for i, m in enumerate(mass):
dn, N = t.get_dndz(m / h)
dndz[i, :] = dn
Ns[i, :] = N
return t.zx, dndz, Ns
if __name__ == "__main__":
h = 0.7
z1 = 0
z2 = 1.2
dz = 0.025
# build the mass array
zarr = numpy.arange(z1, z2 + dz, dz)
mass = numpy.ones_like(zarr) * 1e14
ps2 = Table.read('../catalogs/PSZ2v1.fits')
df2 = ps2.to_pandas()
data = df2[['REDSHIFT', 'MSZ']]
data['REDSHIFT'].replace(-1, numpy.nan, inplace=True)
# redshift bins
zbins = [0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 3]
nMasses = 100
big_mass = []
for j in range(nMasses):
mass = numpy.ones_like(zarr) * 1e14
for i in range(len(zbins) - 1):
mask = (zbins[i] <= zarr) & (zarr < zbins[i + 1])
mass[mask] *= float(data.loc[(zbins[i] <= data['REDSHIFT']) &
(data['REDSHIFT'] < zbins[i + 1]),
'MSZ'].sample()) * h
big_mass.append(mass)
mass = numpy.vstack(big_mass)
z, dN, Ns = generate_Tinker(mass, z1, z2, dz)
# Plot the cumulative distributions
#f, ax = plt.subplots(1)
f = plt.figure(figsize=(7 * (numpy.sqrt(5.) - 1.0) / 2.0, 7))
ax = plt.subplot2grid((3, 1), (0, 0), rowspan=2)
axs = plt.subplot2grid((3, 1), (2, 0))
lws = [0.5, 1.5, 2.5]
# plot the masses at the bottom
for i in range(nMasses):
axs.plot(z, big_mass[i] / 1e14, c='0.7', lw=lws[0], alpha=0.7)
for i in range(nMasses):
# Normalize to unity
Ns[i] = Ns[i] / Ns[i][-1]
#axs.plot(z, Ns[i], c='0.7', lw=lws[0], alpha=0.7, zorder=1)
# add median and 68% lines
ax.plot(z, numpy.median(Ns, axis=0), lw=lws[1], zorder=2, color='#e24a33')
ax.plot(z, numpy.percentile(Ns, 50 + 34.1, axis=0), ls='--', lw=lws[1],
zorder=2, color='#e24a33')
ax.plot(z, numpy.percentile(Ns, 50 - 34.1, axis=0), ls='--', lw=lws[1],
zorder=2, color='#e24a33')
# bottom plot
# make all the curves in one shot. The 68% are harder to make, so that's
# why we are doing it all in one go, versus one at a time.
# get unique masses in each set of bins
umasses = [numpy.unique(numpy.unique(mass, axis=0)[:, i])
for i in range(zarr.size)]
bounds = [numpy.percentile(umasses[i] / 1e14, [50 - 34.1, 50, 50 + 34.1])
for i in range(zarr.size)]
# convert to array
bounds = numpy.array(bounds)
axs.plot(z, bounds[:, 0], ls='--', lw=lws[1], color='#e24a33')
axs.plot(z, bounds[:, 1], lw=lws[1], color='#e24a33')
axs.plot(z, bounds[:, 2], ls='--', lw=lws[1], color='#e24a33')
bins = numpy.linspace(0, 1.5, 50)
n2, bins2, patches = ax.hist(df2.loc[df2['REDSHIFT'].notnull(),
'REDSHIFT'], bins, histtype='step',
cumulative=True, density=True, zorder=3,
lw=lws[-1])
# finish the plot
ax.set_xlim(0, z2)
ax.set_xticklabels([])
axs.set_xlim(0, z2)
ax.set_ylim(0, 1.01)
axs.set_xlabel('Redshift (z)')
ax.set_ylabel('N (<z)')
axs.set_ylabel('M$_{500}$ (10$^{14} M_\odot$)')
# The 50% and 90% levels
ax.axhline(0.9, lw=0.8)
ax.axhline(0.5, lw=0.8)
ax.text(0.10, 0.92, "90%")
ax.text(0.10, 0.52, "50%")
ax.grid(alpha=0.25)
# Make a custom legend
indv_runs = plt.Line2D((0, 0), (0, 1), color='0.7', alpha=0.7, lw=lws[0])
med = plt.Line2D((0, 0), (0, 1), color='#e24a33', lw=lws[1])
quartile = plt.Line2D((0, 0), (0, 1), color='#e24a33', ls='--', lw=lws[1])
psz_hist = plt.Line2D((0, 0), (0, 1), color='#348abd', lw=lws[-1])
ax.legend([med, quartile, psz_hist], ['Median', '68%',
'PSZ Confirmed (<z)'])
axs.legend([indv_runs], ['M(z)'], loc='lower right')
plt.show()
| mit |
ocastany/Berreman4x4 | examples/validation-Bragg.py | 1 | 4955 | #!/usr/bin/python
# encoding: utf-8
# Berreman4x4 example
# Author: O. Castany, C. Molinaro
# Validation for a TiO2/SiO2 Bragg mirror
import numpy, Berreman4x4
import scipy.linalg
import matplotlib.pyplot as pyplot
from Berreman4x4 import c, pi
from numpy import newaxis, exp, sin
print("\n*** TiO2/SiO2 Bragg mirror ***\n")
############################################################################
# Structure definition
# Front and back materials
n_a = 1.0
n_g = 1.5
air = Berreman4x4.IsotropicNonDispersiveMaterial(n_a)
glass = Berreman4x4.IsotropicNonDispersiveMaterial(n_g)
front = Berreman4x4.IsotropicHalfSpace(air)
back = Berreman4x4.IsotropicHalfSpace(glass)
# Materials for a SiO2/TiO2 Bragg mirror
lbda0 = 1.550e-6
k0 = 2*pi/lbda0
nr_SiO2 = 1.47
nr_TiO2 = 2.23
alpha_SiO2 = 0e2 # (m⁻¹)
alpha_TiO2 = 42e2 # (m⁻¹)
ni_SiO2 = alpha_SiO2 * lbda0 / (4*pi)
ni_TiO2 = alpha_TiO2 * lbda0 / (4*pi)
n_SiO2 = nr_SiO2 + 1j * ni_SiO2
n_TiO2 = nr_TiO2 + 1j * ni_TiO2
SiO2 = Berreman4x4.IsotropicNonDispersiveMaterial(n_SiO2)
TiO2 = Berreman4x4.IsotropicNonDispersiveMaterial(n_TiO2)
# Layers
L_SiO2 = Berreman4x4.HomogeneousIsotropicLayer(SiO2, ("QWP", lbda0))
L_TiO2 = Berreman4x4.HomogeneousIsotropicLayer(TiO2, ("QWP", lbda0))
print("Thickness of the SiO2 QWP: {:.1f} nm".format(L_SiO2.h*1e9))
print("Thickness of the TiO2 QWP: {:.1f} nm".format(L_TiO2.h*1e9))
# Repeated layers: n periods
L = Berreman4x4.RepeatedLayers([L_TiO2, L_SiO2], 4, 0, 0)
# Number of interfaces
N = 2 * L.n + 1
# Structure
s = Berreman4x4.Structure(front, [L], back)
############################################################################
# Analytical calculation
n = numpy.ones(N+1, dtype=complex)
n[0] = n_a
n[1::2] = n_TiO2
n[2::2] = n_SiO2
n[-1] = n_g
n.shape = (-1,1)
d = numpy.ones(N+1)
d[1::2] = L_TiO2.h # d[0] is not used
d[2::2] = L_SiO2.h
(lbda1, lbda2) = (1.1e-6, 2.5e-6)
lbda_list = numpy.linspace(lbda1, lbda2, 200)
def ReflectionCoeff(incidence_angle=0., polarisation='s'):
"""Returns the reflection coefficient in amplitude"""
Kx = n[0]*sin(incidence_angle)
sinPhi = Kx/n
kz = 2*pi/lbda_list * numpy.sqrt(n**2-(Kx)**2)
# Reflexion coefficient r_{k,k+1} for a single interface
# polarisation s:
# r_ab(p) = r_{p,p+1} = (kz(p)-kz(p+1))/(kz(p)+kz(p+1))
# polarisation p:
# r_ab(p) = r_{p,p+1} = (kz(p)*n[p+1]**2-kz(p+1)*n[p]**2) \
# /(kz(p)*n[p]**2+kz(p+1)*n[p+1]**2)
if (polarisation == 's'):
r_ab = (-numpy.diff(kz,axis=0)) / (kz[:-1] + kz[1:])
elif (polarisation == 'p'):
r_ab =(kz[:-1]*(n[1:])**2 - kz[1:]*(n[:-1])**2) \
/ (kz[:-1]*(n[1:])**2 + kz[1:]*(n[:-1])**2)
# Local function definition for recursive calculation
def U(k):
"""Returns reflection coefficient U(k) = r_{k, {k+1,...,N}}
Used recursively.
"""
p = k+1
if (p == N):
res = r_ab[N-1]
else :
res = (r_ab[p-1] + U(p)*exp(2j*kz[p]*d[p])) \
/ (1 + r_ab[p-1] * U(p)*exp(2j*kz[p]*d[p]))
return res
return U(0)
# Power reflexion coefficient for different incidence angles and polarisations
R_th_ss_0 = (abs(ReflectionCoeff(0, 's')))**2 # Phi_i = 0
R_th_ss = (abs(ReflectionCoeff(pi/4, 's')))**2 # Phi_i = pi/4
R_th_pp = (abs(ReflectionCoeff(pi/4, 'p')))**2
############################################################################
# Calculation with Berreman4x4
# Incidence angle Phi_i = 0, 's' polarization
Kx = front.get_Kx_from_Phi(0)
data = numpy.array([s.getJones(Kx, 2*pi/lbda) for lbda in lbda_list])
r_ss = Berreman4x4.extractCoefficient(data, 'r_ss')
R_ss_0 = abs(r_ss)**2
# Incidence angle Phi_i = pi/4, 's' and 'p' polarizations
Kx = front.get_Kx_from_Phi(pi/4)
data = numpy.array([s.getJones(Kx, 2*pi/lbda) for lbda in lbda_list])
r_ss = Berreman4x4.extractCoefficient(data, 'r_ss')
r_pp = Berreman4x4.extractCoefficient(data, 'r_pp')
R_ss = abs(r_ss)**2
R_pp = abs(r_pp)**2
############################################################################
# Plotting
fig = pyplot.figure(figsize=(12., 6.))
pyplot.rcParams['axes.prop_cycle'] = pyplot.cycler('color', 'bgr')
ax = fig.add_axes([0.1, 0.1, 0.7, 0.8])
d = numpy.vstack((R_ss_0,R_ss,R_pp)).T
lines1 = ax.plot(lbda_list,d)
legend1 = ("R_ss (0$^\circ$)","R_ss (45$^\circ$)","R_pp (45$^\circ$)")
d = numpy.vstack((R_th_ss_0,R_th_ss,R_th_pp)).T
lines2 = ax.plot(lbda_list,d,'x')
legend2 = ("R_th_ss (0$^\circ$)","R_th_ss (45$^\circ$)",
"R_th_pp (45$^\circ$)")
ax.legend(lines1 + lines2, legend1 + legend2,
loc='upper left', bbox_to_anchor=(1.05, 1), borderaxespad=0.)
ax.set_title(r"Bragg mirror: Air/{TiO$_2$/SiO$_2$}x" + str(L.n) + "/Glass")
ax.set_xlabel(r"Wavelength $\lambda$ (m)")
ax.set_ylabel(r"$R$")
fmt = ax.xaxis.get_major_formatter()
fmt.set_powerlimits((-3,3))
s.drawStructure()
pyplot.show()
| gpl-3.0 |
hbp-brain-charting/public_protocols | spatial_navigation/paradigm_descriptors/paradigm_descriptors_extraction.py | 1 | 6004 | """
Script to parse the original logfiles of the protocol and generate BIDS-compliant files
Author: Juan Jesus Torre Tresols
Mail: [email protected]
"""
import ast
import argparse
import glob
import os
import numpy as np
import pandas as pd
parser = argparse.ArgumentParser(description='Parameters for the logfiles')
parser.add_argument('-n', '--number', metavar='SubjectNum', type=int,
help="Subject number. It will be formatted as "
"a 2-digit number.")
parser.add_argument('-t', '--type', metavar='SubjectType', type=str,
default='sub', choices=['sub', 'MRI-pilot'],
help="Session type. Choices: "
"%(choices)s. Default: %(default)s")
args = parser.parse_args()
sub_num = "%02d" % args.number
sub_type = args.type
# Functions
def filter_events(logfile):
"""Keep only rows corresponding to events, and remove unused columns"""
logfile.drop(labels=logfile[logfile['Data type'] != 'EVENT'].index, inplace=True)
logfile.dropna(axis=1, inplace=True)
logfile.reset_index(inplace=True, drop=True)
def correct_onsets(logfile):
"""
Set the MR pulse to be time 0, and correct the rest of the timestamps, as well
as expressing them in seconds
"""
ttl = logfile.iloc[0, 1]
logfile["Time"] -= ttl
logfile["Time"] = logfile["Time"].round(2)
def extract_events(logfile):
"""
Extract the onset and event type of all the events of interest and make a new DataFrame
with them
Parameters
----------
logfile: pd.DataFrame
Input logfile. Events will be extracted from it
Returns
-------
events_df: pd.DataFrame
New DaraFrame with organized information from the input
"""
events = []
cols = ['onset', 'duration', 'trial_type']
encoding = 0 # No encoding at the beginning
# intersect = 0 # Flag for intersections to avoid weird behavior
for index, row in logfile.iterrows():
trial_info = row.iloc[2]
if trial_info == 'Moving subject through city':
encoding = 1 # We start encoding
trial = [row.Time, 0.0, 'encoding_start']
elif trial_info == 'Movement along path ended':
encoding = 0 # Finished encoding
trial = [row.Time, 0.0, 'encoding_end']
elif encoding and "closing in on intersection" in trial_info:
# intersect = 1 # Now looking for the end of this intersection
intersection_n = ast.literal_eval(trial_info)[1]
trial = [row.Time, 0.0, 'intersection_{0}'.format(intersection_n)]
elif encoding and "away from intersection" in trial_info:
# intersect = 0
trial = [row.Time, 0.0, 'navigation']
elif 'trial' in trial_info:
trial_type = trial_info.split(' ')[1]
trial = [row.Time, 0.0, trial_type]
elif trial_info == 'Crosshair displayed':
trial = [row.Time, 0.0, 'pointing_{0}'.format(trial_type)]
elif 'ITI' in trial_info:
trial = [row.Time, 0.0, 'fixation']
# This last row is only to get durations later
elif index == logfile.tail(1).index:
trial = [row.Time, 0.0, 'last']
else:
continue
events.append(trial)
events_df = pd.DataFrame(events, columns=cols)
# Get durations
_get_durations(events_df)
# Drop last row
events_df.drop(events_df.tail(1).index, inplace=True)
return events_df
def _get_durations(logfile):
"""Calculate events' using next row's onset."""
logfile["duration"] = logfile.shift(-1)["onset"] - logfile["onset"]
logfile["duration"] = logfile["duration"].round(2)
def clean_run_end(arr):
"""Remove extra indicators for run end when two of them are very close"""
diff = np.diff(arr)
bad_idx = []
for idx, i in enumerate(diff):
if i < 100:
bad_idx.append(idx + 1)
clean_list = [i for j, i in enumerate(arr) if j not in bad_idx]
return clean_list
# Number of exp and control tials per run
n_trials = 8
n_control = 4
events_per_trial = 3 # All trials have beginning, pointing and fixation phases
# Paths
path = os.getcwd()
input_path = os.path.join(path, '../protocol/logs')
output_path = os.path.join(path, 'paradigm_descriptors_logfiles')
if not os.path.exists(output_path):
os.mkdir(output_path)
# Main
glob_str = os.path.join(input_path, '*_{}_*main.csv'.format(sub_num))
logfiles_path = glob.glob(glob_str)
for logfile in logfiles_path:
first_run = int(logfile.split('_')[-2]) # Get the first run from the logfile name
log = pd.read_csv(logfile, skiprows=4)
filter_events(log)
run_onsets = log[log['sPos (x)'] == "['Onset']"].index
run_end = log[(log['sPos (x)'] == 'Simulation continued')
| (log['sPos (x)'] == 'Test ended')].index
# In case there are extra button presses for run end
run_end = clean_run_end(run_end)
for idx, (onset, end) in enumerate(zip(run_onsets, run_end)):
# Add the first run to the index get the real run number
run_num = idx + first_run
# Get the expected length of the run
length = (n_trials + n_control) * events_per_trial
# if run_num != 0:
# length += 1 # All runs except the first one also have the encoding phase
# Get the data corresponding to the run and correct the timings around the TTL
run = log.iloc[onset:end + 1, :]
run_copy = run.copy(deep=True)
correct_onsets(run_copy)
# Select the events of interest and put them in a new dataframe
new_df = extract_events(run_copy)
# Save the logfile
filename = "{}-{}_task-SpatialNavigation_run{}.tsv".format(sub_type, sub_num, run_num + 1)
file_path = os.path.join(output_path, filename)
# if len(new_df) == length:
new_df.to_csv(file_path, sep='\t', index=False)
| bsd-3-clause |
mmacgilvray18/Phospho_Network | python/Define_Shared_Interator_input_outputs.py | 1 | 4511 | '''
The function of this script is as follows: For each SI and it's interactions a submodules constituents, the script determine if the
SI is a likely submodule regulator (that is, the Shared Interactor has at least 1 directional interaction, or ppi interaction, with a subModule protein aimed from the SI to the submodule), or if the subModule proteins are act upon the SI (that is, all interactions between the SI and subModule proteins have the 'Reverse' designation', indicating that the subModule proteins act upon the SI).
-If all of the interactions are reversed, then the script will define the relationship between the SI and the subModule as "Output", indicating that the SI is likely downstream of the submodule and is not likely regulating the submodule.
-If there is at least one interaction that is NOT reverse (ie, kinase-substrate) or is and non-directed ppi, the relationship between the SI and the subMOdule is defined as "Input", suggesting there is a possibility that the SI can regulate the submodule protein phosphorylation state.
This script takes an input file that contains the following:
- All enriched Shared Interactors (SIs) (according to HyperG) and their connections to subModules.
- All known protein interactions for each SI (ppi, kinase-substrate, etc)
- Many of these interactions are directed (kinase-substrate, metabolic pathway, etc). PPI are not a directed interaction.
'''
import pandas as pd
Input_df=pd.read_csv('/Users/mmacgilvray18/Desktop/DTT_T120_Prep_for_Orientation_Script_Sept2017.csv')
# Split the input DF into independent DFs based on the term in the SI_Module column (this columns contains the SI and it's connection to each subModule).
def Split_based_on_SI_Module_Column():
DF_lst =[]
for SI_Module in Input_df['SI_Module'].unique():
DF=Input_df.loc[Input_df['SI_Module']==SI_Module]
DF_lst.append(DF)
return DF_lst
DF_lst=Split_based_on_SI_Module_Column()
# This function counts, for each DF, how many of the interactions are reversed. It also counts the length of the dataframe, and then
# subtracts the the length of the dataframe from the counts. If the resultant value is 0, then all of the interactions were reversed.
def Count_Instances_of_Reverse_Interaction():
DF_Counts_lst=[]
for df in DF_lst:
df=df.copy()
df['Counts']=df.Interaction_Directionality.str.contains('Reversed').sum() # Count the number of interactions that are "Reversed"
x=len(df)
df['Length']=x
df['Counts_Length']=df['Counts']-df['Length']
DF_Counts_lst.append(df)
return DF_Counts_lst
DF_Counts_lst=Count_Instances_of_Reverse_Interaction()
print (DF_Counts_lst)
# This function assigns 'Input' and 'Output' classifications based on the 'Counts_Length' column in the dataframe.
def Only_Reverse_Interactions_Move_to_Outgoing_Columns():
df_Modified_Outgoing_lst=[]
for df in DF_Counts_lst:
#print (df.dtypes)
for value in df['Counts_Length'].unique():
#print (value)
if value == 0:
df['Shared_Interactor_subModule_Relationship']= 'Output'
df_Modified_Outgoing_lst.append(df)
else:
df['Shared_Interactor_subModule_Relationship']= 'Input'
df_Modified_Outgoing_lst.append(df)
return df_Modified_Outgoing_lst
df_Modified_Outgoing_lst=Only_Reverse_Interactions_Move_to_Outgoing_Columns()
# This function concatenates the dataframes back together, leaving a single DF.
def ConcatenateDFs():
EmptyDF = pd.DataFrame() # create an empty dataframe
for df in df_Modified_Outgoing_lst: # select a dataframe in the list
df=df.copy() # make a copy of that dataframe
EmptyDF=EmptyDF.append(df) # append to the empty DF the dataframe selected and overwrite the empty dataframe
return EmptyDF
Final=ConcatenateDFs()
print (Final)
#Function writes out a Dataframe to a CSV file.
def DF_to_CSV(dataframe, NewFileName):
path ='/Users/mmacgilvray18/Desktop/'
dataframe.to_csv (path+NewFileName,sep='\t')
Final_Keep_Columns_Needed_For_SIF=Final[['SI_Module', 'Shared_Interactor', 'subModule_Name', 'Shared_Interactor_subModule_Relationship']]
Final_Keep_Columns_Needed_For_SIF=Final_Keep_Columns_Needed_For_SIF.drop_duplicates('SI_Module')
DF_to_CSV(Final_Keep_Columns_Needed_For_SIF, 'SIs_subModule_Relationships_Defined_DTT_T120_Network_Input_for_making_SIF_Sept2017.csv')
| gpl-3.0 |
arg-hya/taxiCab | Plots/TaxiPlot/convertThenPlot.py | 1 | 1335 | import pycrs
import mpl_toolkits.basemap.pyproj as pyproj # Import the pyproj module
import shapefile as shp
import matplotlib.pyplot as plt
shpFilePath = r"D:\TaxiCab\mycode\Plots\ShapefileAndTrajectory\taxi_zones\taxi_zones"
sf = shp.Reader(shpFilePath)
records = sf.records()
plt.figure()
for shape in sf.shapeRecords():
x = [i[0] for i in shape.shape.points[:]]
y = [i[1] for i in shape.shape.points[:]]
plt.plot(x,y)
projobj = pycrs.loader.from_file(r'D:\TaxiCab\mycode\Plots\ShapefileAndTrajectory\taxi_zones\taxi_zones.prj')
proj4string = projobj.to_proj4()
print(proj4string)
isn2004=pyproj.Proj(proj4string, preserve_units=True)
wgs84=pyproj.Proj("+init=EPSG:4326")
i = 0
lat = []
lon = []
#1 foot = 0.3048 meters
conv = 0.3048
with open("trip_data_1_next_trip_start_location.txt") as f:
next(f)
for line in f:
i += 1
# print line
strings = line.split(",")
co1 = float(strings[0])
co2 = float(strings[1])
x2,y2 = pyproj.transform(wgs84,isn2004 ,co1,co2)
lat.append(x2)
lon.append(y2)
# if i == 14450:
# break
if i == 1169120:
break
x1 = lat
y1 = lon
plt.plot(x1, y1, 'o', color='blue', markersize=7, markeredgewidth=0.0)
plt.show()
| gpl-3.0 |
dopplershift/MetPy | src/metpy/plots/ctables.py | 1 | 8925 | # Copyright (c) 2014,2015,2017,2019 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""Work with custom color tables.
Contains a tools for reading color tables from files, and creating instances based on a
specific set of constraints (e.g. step size) for mapping.
.. plot::
import numpy as np
import matplotlib.pyplot as plt
import metpy.plots.ctables as ctables
def plot_color_gradients(cmap_category, cmap_list, nrows):
fig, axes = plt.subplots(figsize=(7, 6), nrows=nrows)
fig.subplots_adjust(top=.93, bottom=0.01, left=0.32, right=0.99)
axes[0].set_title(cmap_category + ' colormaps', fontsize=14)
for ax, name in zip(axes, cmap_list):
ax.imshow(gradient, aspect='auto', cmap=ctables.registry.get_colortable(name))
pos = list(ax.get_position().bounds)
x_text = pos[0] - 0.01
y_text = pos[1] + pos[3]/2.
fig.text(x_text, y_text, name, va='center', ha='right', fontsize=10)
# Turn off *all* ticks & spines, not just the ones with colormaps.
for ax in axes:
ax.set_axis_off()
cmaps = list(ctables.registry)
cmaps = [name for name in cmaps if name[-2:]!='_r']
nrows = len(cmaps)
gradient = np.linspace(0, 1, 256)
gradient = np.vstack((gradient, gradient))
plot_color_gradients('MetPy', cmaps, nrows)
plt.show()
"""
import ast
import logging
from pathlib import Path
import matplotlib.colors as mcolors
from ..package_tools import Exporter
exporter = Exporter(globals())
TABLE_EXT = '.tbl'
log = logging.getLogger(__name__)
def _parse(s):
if hasattr(s, 'decode'):
s = s.decode('ascii')
if not s.startswith('#'):
return ast.literal_eval(s)
return None
@exporter.export
def read_colortable(fobj):
r"""Read colortable information from a file.
Reads a colortable, which consists of one color per line of the file, where
a color can be one of: a tuple of 3 floats, a string with a HTML color name,
or a string with a HTML hex color.
Parameters
----------
fobj : a file-like object
A file-like object to read the colors from
Returns
-------
List of tuples
A list of the RGB color values, where each RGB color is a tuple of 3 floats in the
range of [0, 1].
"""
ret = []
try:
for line in fobj:
literal = _parse(line)
if literal:
ret.append(mcolors.colorConverter.to_rgb(literal))
return ret
except (SyntaxError, ValueError) as e:
raise RuntimeError(f'Malformed colortable (bad line: {line})') from e
def convert_gempak_table(infile, outfile):
r"""Convert a GEMPAK color table to one MetPy can read.
Reads lines from a GEMPAK-style color table file, and writes them to another file in
a format that MetPy can parse.
Parameters
----------
infile : file-like object
The file-like object to read from
outfile : file-like object
The file-like object to write to
"""
for line in infile:
if not line.startswith('!') and line.strip():
r, g, b = map(int, line.split())
outfile.write(f'({r / 255:f}, {g / 255:f}, {b / 255:f})\n')
class ColortableRegistry(dict):
r"""Manages the collection of color tables.
Provides access to color tables, read collections of files, and generates
matplotlib's Normalize instances to go with the colortable.
"""
def scan_resource(self, pkg, path):
r"""Scan a resource directory for colortable files and add them to the registry.
Parameters
----------
pkg : str
The package containing the resource directory
path : str
The path to the directory with the color tables
"""
try:
from importlib.resources import files as importlib_resources_files
except ImportError: # Can remove when we require Python > 3.8
from importlib_resources import files as importlib_resources_files
for entry in (importlib_resources_files(pkg) / path).iterdir():
if entry.suffix == TABLE_EXT:
with entry.open() as stream:
self.add_colortable(stream, entry.with_suffix('').name)
def scan_dir(self, path):
r"""Scan a directory on disk for color table files and add them to the registry.
Parameters
----------
path : str
The path to the directory with the color tables
"""
for entry in Path(path).glob('*' + TABLE_EXT):
if entry.is_file():
with entry.open() as fobj:
try:
self.add_colortable(fobj, entry.with_suffix('').name)
log.debug('Added colortable from file: %s', entry)
except RuntimeError:
# If we get a file we can't handle, assume we weren't meant to.
log.info('Skipping unparsable file: %s', entry)
def add_colortable(self, fobj, name):
r"""Add a color table from a file to the registry.
Parameters
----------
fobj : file-like object
The file to read the color table from
name : str
The name under which the color table will be stored
"""
self[name] = read_colortable(fobj)
self[name + '_r'] = self[name][::-1]
def get_with_steps(self, name, start, step):
r"""Get a color table from the registry with a corresponding norm.
Builds a `matplotlib.colors.BoundaryNorm` using `start`, `step`, and
the number of colors, based on the color table obtained from `name`.
Parameters
----------
name : str
The name under which the color table will be stored
start : float
The starting boundary
step : float
The step between boundaries
Returns
-------
`matplotlib.colors.BoundaryNorm`, `matplotlib.colors.ListedColormap`
The boundary norm based on `start` and `step` with the number of colors
from the number of entries matching the color table, and the color table itself.
"""
from numpy import arange
# Need one more boundary than color
num_steps = len(self[name]) + 1
boundaries = arange(start, start + step * num_steps, step)
return self.get_with_boundaries(name, boundaries)
def get_with_range(self, name, start, end):
r"""Get a color table from the registry with a corresponding norm.
Builds a `matplotlib.colors.BoundaryNorm` using `start`, `end`, and
the number of colors, based on the color table obtained from `name`.
Parameters
----------
name : str
The name under which the color table will be stored
start : float
The starting boundary
end : float
The ending boundary
Returns
-------
`matplotlib.colors.BoundaryNorm`, `matplotlib.colors.ListedColormap`
The boundary norm based on `start` and `end` with the number of colors
from the number of entries matching the color table, and the color table itself.
"""
from numpy import linspace
# Need one more boundary than color
num_steps = len(self[name]) + 1
boundaries = linspace(start, end, num_steps)
return self.get_with_boundaries(name, boundaries)
def get_with_boundaries(self, name, boundaries):
r"""Get a color table from the registry with a corresponding norm.
Builds a `matplotlib.colors.BoundaryNorm` using `boundaries`.
Parameters
----------
name : str
The name under which the color table will be stored
boundaries : array_like
The list of boundaries for the norm
Returns
-------
`matplotlib.colors.BoundaryNorm`, `matplotlib.colors.ListedColormap`
The boundary norm based on `boundaries`, and the color table itself.
"""
cmap = self.get_colortable(name)
return mcolors.BoundaryNorm(boundaries, cmap.N), cmap
def get_colortable(self, name):
r"""Get a color table from the registry.
Parameters
----------
name : str
The name under which the color table will be stored
Returns
-------
`matplotlib.colors.ListedColormap`
The color table corresponding to `name`
"""
return mcolors.ListedColormap(self[name], name=name)
registry = ColortableRegistry()
registry.scan_resource('metpy.plots', 'colortable_files')
registry.scan_dir(Path.cwd())
with exporter:
colortables = registry
| bsd-3-clause |
amcamd/rocBLAS | scripts/performance/blas/performancereport.py | 1 | 28978 | #!/usr/bin/env python3
import argparse
from collections import OrderedDict
import os
import re
import sys
from matplotlib.ticker import (AutoMinorLocator)
sys.path.append('../../../clients/common/')
import rocblas_gentest as gt
import commandrunner as cr
# TODO: Should any of these ignored arguments be passed on?
IGNORE_YAML_KEYS = [
'KL',
'KU',
'incd',
'incb',
'alphai',
'betai',
'norm_check',
'unit_check',
'timing',
'algo',
'solution_index',
'flags',
'workspace_size',
'initialization',
'category',
'known_bug_platforms',
'name',
'c_noalias_d',
'samples',
'a_type',
'b_type',
'c_type',
'd_type',
'stride_x',
'stride_y',
'ldd',
'stride_a',
'stride_b',
'stride_c',
'stride_d',
]
REGULAR_YAML_KEYS = [
'batch_count',
'function',
'compute_type',
'incx',
'incy',
'alpha',
'beta',
'iters',
#samples', TODO: Implement this functionality at a low level
'transA',
'transB',
'side',
'uplo',
'diag'
]
SWEEP_YAML_KEYS = [
'n',
'm',
'k',
'lda',
'ldb',
'ldc',
]
# If an argument is not relevant to a function, then its value is set to '*'.
# We cannot pass a '*' to subsequent commands because it will, so that flag
# needs to be removed.
class StripStarsArgument(cr.ArgumentABC):
def __init__(self, flag):
cr.ArgumentABC.__init__(self)
self.flag = flag
def get_args(self):
if self._value is None:
return []
#raise RuntimeError('No value set for {}'.format(self.flag))
if self._value == '*': # If an asterisk is specified
return [] # Just ignore the flag entirely
return [self.flag, str(self._value)]
# TODO: handle this better
class IgnoreArgument(cr.ArgumentABC):
def __init__(self, flag):
cr.ArgumentABC.__init__(self)
self.flag = flag
def get_args(self):
return []
class RocBlasArgumentSet(cr.ArgumentSetABC):
def _define_consistent_arguments(self):
self.consistent_args['n' ] = StripStarsArgument('-n' )
self.consistent_args['m' ] = StripStarsArgument('-m' )
self.consistent_args['k' ] = StripStarsArgument('-k' )
self.consistent_args['batch_count' ] = StripStarsArgument('--batch_count' ) #
self.consistent_args['function' ] = StripStarsArgument('-f' ) #
self.consistent_args['compute_type' ] = StripStarsArgument('-r' ) # precision
self.consistent_args['incx' ] = StripStarsArgument('--incx' )
self.consistent_args['incy' ] = StripStarsArgument('--incy' )
self.consistent_args['alpha' ] = StripStarsArgument('--alpha' )
self.consistent_args['beta' ] = StripStarsArgument('--beta' )
self.consistent_args['iters' ] = StripStarsArgument('-i' ) #
self.consistent_args['lda' ] = StripStarsArgument('--lda' )
self.consistent_args['ldb' ] = StripStarsArgument('--ldb' )
self.consistent_args['ldc' ] = StripStarsArgument('--ldc' )
self.consistent_args['transA' ] = StripStarsArgument('--transposeA' )
self.consistent_args['transB' ] = StripStarsArgument('--transposeB' )
#self.consistent_args['initialization'] = StripStarsArgument('-initialization') # Unused?
self.consistent_args['side' ] = StripStarsArgument('--side' )
self.consistent_args['uplo' ] = StripStarsArgument('--uplo' )
self.consistent_args['diag' ] = StripStarsArgument('--diag' )
self.consistent_args['device' ] = cr.DefaultArgument('--device', 0 )
def _define_variable_arguments(self):
self.variable_args['output_file'] = cr.PipeToArgument()
def __init__(self, **kwargs):
cr.ArgumentSetABC.__init__(
self, **kwargs
)
def get_full_command(self, run_configuration):
exec_name = os.path.join(run_configuration.executable_directory, 'rocblas-bench')
if not os.path.exists(exec_name):
raise RuntimeError('Unable to find {}!'.format(exec_name))
#self.set('nsample', run_configuration.num_runs)
self.set('output_file', self.get_output_file(run_configuration))
return [exec_name] + self.get_args()
def collect_timing(self, run_configuration):
output_filename = self.get_output_file(run_configuration)
rv = {}
print('Processing {}'.format(output_filename))
if os.path.exists(output_filename):
lines = open(output_filename, 'r').readlines()
us_vals = []
gf_vals = []
bw_vals = []
gf_string = "rocblas-Gflops"
bw_string = "rocblas-GB/s"
us_string = "us"
for i in range(0, len(lines)):
if re.search(r"\b" + re.escape(us_string) + r"\b", lines[i]) is not None:
us_line = lines[i].strip().split(",")
index = [idx for idx, s in enumerate(us_line) if us_string in s][0] #us_line.index()
us_vals.append(float(re.split(r',\s*(?![^()]*\))', lines[i+1])[index]))
if gf_string in lines[i]:
gf_line = lines[i].split(",")
index = gf_line.index(gf_string)
gf_vals.append(float(re.split(r',\s*(?![^()]*\))', lines[i+1])[index]))
if bw_string in lines[i]:
bw_line = lines[i].split(",")
index = bw_line.index(bw_string)
bw_vals.append(float(re.split(r',\s*(?![^()]*\))', lines[i+1])[index]))
if len(us_vals) > 0 and data_type == 'time':
rv['Time (microseconds)'] = us_vals
if len(bw_vals) > 0 and data_type == 'bandwidth':
rv['Bandwidth (GB/s)'] = bw_vals
if len(gf_vals) > 0 and data_type == 'gflops':
rv['GFLOP/s'] = gf_vals
else:
print('{} does not exist'.format(output_filename))
return rv
class YamlData:
def __init__(self, config_file):
self.config_file = config_file
self.test_cases = []
self.execute_run()
def reorder_data(self):
old_data = self.test_cases
new_data = []
names = []
for test in old_data:
name = test['function']
precision = test['compute_type']
side = test['side']
if (name,precision) not in names: # TODO: This will always be true because "side" is not in the tuple.
type = [ x for x in old_data if x['function']==name and x['compute_type'] == precision and x['side'] == side ]
new_data.append(type)
names.append((name,precision, side))
self.test_cases = new_data
#Monkey Patch
def write_test(self, test):
self.test_cases.append(test)
#Monkey Patch
def process_doc(self, doc):
"""Process one document in the YAML file"""
# Ignore empty documents
if not doc or not doc.get('Tests'):
return
# Clear datatypes and params from previous documents
gt.datatypes.clear()
gt.param.clear()
# Return dictionary of all known datatypes
gt.datatypes.update(gt.get_datatypes(doc))
# Arguments structure corresponding to C/C++ structure
gt.param['Arguments'] = type('Arguments', (gt.ctypes.Structure,),
{'_fields_': gt.get_arguments(doc)})
# Special names which get expanded as lists of arguments
gt.param['dict_lists_to_expand'] = doc.get('Dictionary lists to expand') or ()
# Lists which are not expanded
gt.param['lists_to_not_expand'] = doc.get('Lists to not expand') or ()
# Defaults
defaults = doc.get('Defaults') or {}
default_add_ons = {'m': -1, 'M': -1, 'n': -1, 'N': -1, 'k': -1, 'K': -1, 'lda': -1, 'ldb': -1, 'ldc': -1, 'LDA': -1, 'LDB': -1, 'LDC': -1, 'iters': 1, 'flops': '', 'mem': '', 'samples': 1, 'step_mult': 0}
defaults.update(default_add_ons)
# Known Bugs
gt.param['known_bugs'] = doc.get('Known bugs') or []
# Functions
gt.param['Functions'] = doc.get('Functions') or {}
# Instantiate all of the tests, starting with defaults
for test in doc['Tests']:
case = defaults.copy()
case.update(test)
gt.generate(case, gt.instantiate)
def import_data(self):
gt.args['includes'] = []
gt.args['infile'] = self.config_file
gt.write_test = self.write_test
for doc in gt.get_yaml_docs():
self.process_doc(doc)
def execute_run(self):
self.import_data()
self.reorder_data()
class RocBlasYamlComparison(cr.Comparison):
def __init__(self, test_yaml, data_type, **kwargs):
def get_function_prefix(compute_type):
if '32_r' in compute_type:
return 's'
elif '64_r' in compute_type:
return 'd'
elif '32_c' in compute_type:
return 'c'
elif '64_c' in compute_type:
return 'z'
elif 'bf16_r' in compute_type:
return 'bf'
elif 'f16_r' in compute_type:
return 'h'
else:
print('Error - Cannot detect precision preFix: ' + compute_type)
cr.Comparison.__init__(self,
description=get_function_prefix(test_yaml[0]['compute_type']) + test_yaml[0]['function'].split('_')[0] + ' Performance',
**kwargs)
for test in test_yaml:
argument_set = RocBlasArgumentSet()
all_inputs = {key:test[key] for key in test if not key in IGNORE_YAML_KEYS} # deep copy and cast to dict
# regular keys have a direct mapping to the benchmark executable
for key in REGULAR_YAML_KEYS:
argument_set.set(key, all_inputs.pop(key))
# step_size and step_mult are special, the determine how to sweep variables
step_size = int(all_inputs.pop('step_size')) if 'step_size' in all_inputs else 10 #backwards compatiable default
step_mult = (int(all_inputs.pop('step_mult')) == 1) if 'step_mult' in all_inputs else False
mem = all_inputs.pop('mem')
flops = all_inputs.pop('flops')
self.mem = mem
self.flops = flops
if step_size == 1 and step_mult:
raise ValueError('Cannot increment by multiplying by one.')
sweep_lists = {}
for key in SWEEP_YAML_KEYS:
key_min = int(all_inputs.pop(key))
key_max = int(all_inputs.pop(key.upper()))
key_values = []
while key_min <= key_max:
key_values.append(key_min)
if(key_min == -1):
break
key_min = key_min*step_size if step_mult else key_min+step_size
sweep_lists[key] = key_values
sweep_lengths = {key:len(sweep_lists[key]) for key in sweep_lists}
max_sweep_length = max(sweep_lengths.values())
for key in sweep_lists:
if sweep_lists[key][0] != -1:
sweep_lists[key] += [sweep_lists[key][sweep_lengths[key]-1]] * (max_sweep_length - sweep_lengths[key])
sweep_lengths[key] = max_sweep_length
for sweep_idx in range(max_sweep_length):
sweep_argument_set = argument_set.get_deep_copy()
for key in sweep_lists:
if sweep_lengths[key] == max_sweep_length:
sweep_argument_set.set(key, sweep_lists[key][sweep_idx])
self.add(sweep_argument_set)
if len(all_inputs) > 0:
print('WARNING - The following values were unused: {}'.format(all_inputs))
self.data_type = data_type
def write_docx_table(self, document):
if len(self.argument_sets) > 0:
argument_diff = cr.ArgumentSetDifference(self.argument_sets, ignore_keys=self._get_sweep_keys())
differences = argument_diff.get_differences()
is_a_comparison = len(differences) > 0
document.add_paragraph(
('For all runs, ``' if is_a_comparison else 'Command: ')
+ ' '.join(self.argument_sets[0].get_args(require_keys=argument_diff.get_similarities()))
+("'' is held constant." if is_a_comparison else '')
)
# if is_a_comparison:
# header_row = ['label'] + differences
# num_columns = len(header_row)
# sorted_argument_sets = self.sort_argument_sets(isolate_keys=self._get_sweep_keys())
# num_rows = len(sorted_argument_sets) + 1
# table_style = 'Colorful Grid' if self.user_args.docx_template is None else None
# table = document.add_table(num_rows, num_columns, style=table_style)
# row_idx = 0
# for col_idx, data in enumerate(header_row):
# table.cell(row_idx, col_idx).text = data
# for argument_set_hash, argument_sets in sorted_argument_sets.items():
# if len(argument_sets) > 0:
# row_idx += 1
# argument_set = argument_sets[0]
# row = [argument_set_hash]
# for key in differences:
# argument = argument_set.get(key)
# row.append(argument.get_value() if argument.is_set() else 'DEFAULT')
# for col_idx, data in enumerate(row):
# table.cell(row_idx, col_idx).text = str(data)
def write_latex_table(self, latex_module):
if len(self.argument_sets) > 0:
argument_diff = cr.ArgumentSetDifference(self.argument_sets, ignore_keys=self._get_sweep_keys())
differences = argument_diff.get_differences()
is_a_comparison = len(differences) > 0
latex_module.append(
('For all runs, ``' if is_a_comparison else 'Command: ')
+ ' '.join(self.argument_sets[0].get_args(require_keys=argument_diff.get_similarities()))
+("'' is held constant." if is_a_comparison else '')
)
# if is_a_comparison:
# with latex_module.create(cr.pylatex.Center()) as centered:
# tabu_format = 'r|' + ''.join(['c' for key in differences])
# with centered.create(cr.pylatex.Tabu(tabu_format)) as data_table:
# header_row = ['label'] + differences
# data_table.add_row(header_row, mapper=[cr.pylatex.utils.bold])
# data_table.add_hline()
# sorted_argument_sets = self.sort_argument_sets(isolate_keys=self._get_sweep_keys())
# for argument_set_hash, argument_sets in sorted_argument_sets.items():
# if len(argument_sets) > 0:
# argument_set = argument_sets[0]
# row = [argument_set_hash]
# results = argument_set.collect_timing(run_configuration)
# for metric_label in results:
# if not metric_label in y_list_by_metric:
# y_list_by_metric[metric_label] = []
# y_list_by_metric[metric_label].extend(results[metric_label])
# # For each metric, add a set of bars in the bar chart.
# for metric_label, y_list in y_list_by_metric.items():
# y_scatter_by_group[group_label].extend(sorted(y_list))
# for key in differences:
# argument = argument_set.get(key)
# row.append(argument.get_value() if argument.is_set() else 'DEFAULT')
# data_table.add_row(row)
data_type_classes = {}
class TimeComparison(RocBlasYamlComparison):
def __init__(self, **kwargs):
RocBlasYamlComparison.__init__(self, data_type='time', **kwargs)
# data_type_classes['time'] = TimeComparison
class FlopsComparison(RocBlasYamlComparison):
def __init__(self, **kwargs):
RocBlasYamlComparison.__init__(self, data_type='gflops', **kwargs)
def plot(self, run_configurations, axes):
num_argument_sets = len(self.argument_sets)
if num_argument_sets == 0:
return
sorted_argument_sets = self.sort_argument_sets(isolate_keys=[]) # No sort applied, but labels provided
argument_diff = cr.ArgumentSetDifference(self.argument_sets, ignore_keys=self._get_sweep_keys())
differences = argument_diff.get_differences()
test = []
xLabel = []
for key in differences:
xLabel.append(key)
for argument_set_hash, argument_sets in sorted_argument_sets.items():
argument_set = argument_sets[0]
precision = argument_set.get("compute_type").get_value()
function = argument_set.get("function").get_value()
for key in differences:
argument = argument_set.get(key)
test.append(argument.get_value() if argument.is_set() else 'DEFAULT')
break;
grouped_run_configurations = run_configurations.group_by_label()
num_groups = len(grouped_run_configurations)
metric_labels = [key for key in self.argument_sets[0].collect_timing(run_configurations[0])]
num_metrics = len(metric_labels)
if num_metrics == 0:
return
# loop over independent outputs
y_scatter_by_group = OrderedDict()
for group_label, run_configuration_group in grouped_run_configurations.items():
# x_scatter_by_group[group_label] = []
y_scatter_by_group[group_label] = []
# loop over argument sets that differ other than the swept variable(s)
for subset_label, partial_argument_sets in sorted_argument_sets.items():
if len(partial_argument_sets) != 1:
raise ValueError('Assumed that sorting argument sets with no keys has a single element per sort.')
argument_set = partial_argument_sets[0]
y_list_by_metric = OrderedDict() # One array of y values for each metric
# loop over number of coarse grain runs and concatenate results
for run_configuration in run_configuration_group:
results = argument_set.collect_timing(run_configuration)
for metric_label in results:
if not metric_label in y_list_by_metric:
y_list_by_metric[metric_label] = []
y_list_by_metric[metric_label].extend(results[metric_label])
# For each metric, add a set of bars in the bar chart.
for metric_label, y_list in y_list_by_metric.items():
y_scatter_by_group[group_label].extend(sorted(y_list))
for group_label, run_configuration_group in grouped_run_configurations.items():
for run_configuration in run_configuration_group:
mclk = run_configuration.load_specifications()['ROCm Card1']["Start mclk"].split("Mhz")[0]
sclk = run_configuration.load_specifications()['ROCm Card1']["Start sclk"].split("Mhz")[0]
theoMax = 0
precisionBits = int(re.search(r'\d+', precision).group())
if(function == 'gemm' and precisionBits == 32): #xdlops
theoMax = float(sclk)/1000.00 * 256 * 120 #scaling to appropriate precision
elif(function == 'trsm' or function == 'gemm'): #TODO better logic to decide memory bound vs compute bound
theoMax = float(sclk)/1000.00 * 128 * 120 * 32.00 / precisionBits #scaling to appropriate precision
elif self.flops and self.mem:
try:
n=100000
flops = eval(self.flops)
mem = eval(self.mem)
theoMax = float(mclk) / float(eval(self.mem)) * eval(self.flops) * 32 / precisionBits / 4
except:
print("flops and mem equations produce errors")
if theoMax:
theoMax = round(theoMax)
x_co = (test[0], test[len(test)-1])
y_co = (theoMax, theoMax)
axes.plot(x_co, y_co, label = "Theoretical Peak Performance: "+str(theoMax)+" GFLOP/s")
for group_label in y_scatter_by_group:
axes.scatter(
# x_bar_by_group[group_label],
test,
y_scatter_by_group[group_label],
# gap_scalar * width,
color='black',
# label = group_label,
)
axes.plot(
# x_scatter_by_group[group_label],
test,
y_scatter_by_group[group_label],
# 'k*',
'-ok',
)
axes.xaxis.set_minor_locator(AutoMinorLocator())
axes.yaxis.set_minor_locator(AutoMinorLocator())
axes.set_ylabel(metric_labels[0] if len(metric_labels) == 1 else 'Time (s)' )
axes.set_xlabel('='.join(xLabel))
return True
class BandwidthComparison(RocBlasYamlComparison):
def __init__(self, **kwargs):
RocBlasYamlComparison.__init__(self, data_type='bandwidth', **kwargs)
def plot(self, run_configurations, axes):
num_argument_sets = len(self.argument_sets)
if num_argument_sets == 0:
return
sorted_argument_sets = self.sort_argument_sets(isolate_keys=[]) # No sort applied, but labels provided
argument_diff = cr.ArgumentSetDifference(self.argument_sets, ignore_keys=self._get_sweep_keys())
differences = argument_diff.get_differences()
test = []
xLabel = []
for key in differences:
xLabel.append(key)
for argument_set_hash, argument_sets in sorted_argument_sets.items():
argument_set = argument_sets[0]
precision = argument_set.get("compute_type").get_value()
function = argument_set.get("function").get_value()
for key in differences:
argument = argument_set.get(key)
test.append(argument.get_value() if argument.is_set() else 'DEFAULT')
break;
grouped_run_configurations = run_configurations.group_by_label()
num_groups = len(grouped_run_configurations)
metric_labels = [key for key in self.argument_sets[0].collect_timing(run_configurations[0])]
num_metrics = len(metric_labels)
if num_metrics == 0:
return
# loop over independent outputs
y_scatter_by_group = OrderedDict()
for group_label, run_configuration_group in grouped_run_configurations.items():
# x_scatter_by_group[group_label] = []
y_scatter_by_group[group_label] = []
# loop over argument sets that differ other than the swept variable(s)
for subset_label, partial_argument_sets in sorted_argument_sets.items():
if len(partial_argument_sets) != 1:
raise ValueError('Assumed that sorting argument sets with no keys has a single element per sort.')
argument_set = partial_argument_sets[0]
y_list_by_metric = OrderedDict() # One array of y values for each metric
# loop over number of coarse grain runs and concatenate results
for run_configuration in run_configuration_group:
results = argument_set.collect_timing(run_configuration)
for metric_label in results:
if not metric_label in y_list_by_metric:
y_list_by_metric[metric_label] = []
y_list_by_metric[metric_label].extend(results[metric_label])
# For each metric, add a set of bars in the bar chart.
for metric_label, y_list in y_list_by_metric.items():
y_scatter_by_group[group_label].extend(sorted(y_list))
for group_label, run_configuration_group in grouped_run_configurations.items():
for run_configuration in run_configuration_group:
# Reference: MI-100 theoretical memory bandwidth by default
tmb_MI100 = 1200
# Reference: radeon 7 theoretical memory bandwidth by default
tmb_radeon7 = 1000
theoMax = 0
precisionBits = int(re.search(r'\d+', precision).group())
if(function == 'gemm' and precisionBits == 32): #xdlops
theoMax = tmb_MI100 #scaling to appropriate precision
elif(function == 'trsm' or function == 'gemm'): #TODO better logic to decide memory bound vs compute bound
theoMax = tmb_MI100 #scaling to appropriate precision
elif(function == 'copy' and precisionBits == 32):
theoMax = tmb_MI100
elif(function == 'swap' and precisionBits == 32):
theoMax = tmb_MI100
elif self.flops and self.mem:
try:
theoMax = tmb_MI100
except:
print("flops and mem equations produce errors")
if theoMax:
theoMax = round(theoMax)
x_co = (test[0], test[len(test)-1])
y_co = (theoMax, theoMax)
axes.plot(x_co, y_co, label = "Theoretical Peak Performance: "+str(theoMax)+"GB/s")
for group_label in y_scatter_by_group:
axes.scatter(
# x_bar_by_group[group_label],
test,
y_scatter_by_group[group_label],
# gap_scalar * width,
color='black',
# label = group_label,
)
axes.plot(
# x_scatter_by_group[group_label],
test,
y_scatter_by_group[group_label],
# 'k*',
'-ok',
)
axes.xaxis.set_minor_locator(AutoMinorLocator())
axes.yaxis.set_minor_locator(AutoMinorLocator())
axes.set_ylabel('Bandwidth (GB/s)')
axes.set_xlabel('='.join(xLabel))
return True
data_type_classes['gflops'] = FlopsComparison
data_type_classes['bandwidth'] = BandwidthComparison
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-N', '--num-runs', default=10, type=int,
help='Number of times to run each test.')
parser.add_argument('--data-types', default=data_type_classes.keys(), nargs='+',
choices = data_type_classes.keys(),
help='Types of data to generate plots for.')
parser.add_argument('-I', '--input-yaml', required=True,
help='rocBLAS input yaml config.')
user_args = cr.parse_input_arguments(parser)
command_runner = cr.CommandRunner(user_args)
command_runner.setup_system()
#load yaml then create fig for every test
with open(user_args.input_yaml, 'r') as f:
data = YamlData(f)
f.close()
comparisons = []
#setup tests sorted by their respective figures
for test_yaml in data.test_cases:
for data_type in user_args.data_types:
print(data_type)
data_type_cls = data_type_classes[data_type]
comparison = data_type_cls(test_yaml = test_yaml)
comparisons.append(comparison)
command_runner.add_comparisons(comparisons)
command_runner.main()
| mit |
yonglehou/scikit-learn | examples/model_selection/grid_search_digits.py | 227 | 2665 | """
============================================================
Parameter estimation using grid search with cross-validation
============================================================
This examples shows how a classifier is optimized by cross-validation,
which is done using the :class:`sklearn.grid_search.GridSearchCV` object
on a development set that comprises only half of the available labeled data.
The performance of the selected hyper-parameters and trained model is
then measured on a dedicated evaluation set that was not used during
the model selection step.
More details on tools available for model selection can be found in the
sections on :ref:`cross_validation` and :ref:`grid_search`.
"""
from __future__ import print_function
from sklearn import datasets
from sklearn.cross_validation import train_test_split
from sklearn.grid_search import GridSearchCV
from sklearn.metrics import classification_report
from sklearn.svm import SVC
print(__doc__)
# Loading the Digits dataset
digits = datasets.load_digits()
# To apply an classifier on this data, we need to flatten the image, to
# turn the data in a (samples, feature) matrix:
n_samples = len(digits.images)
X = digits.images.reshape((n_samples, -1))
y = digits.target
# Split the dataset in two equal parts
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.5, random_state=0)
# Set the parameters by cross-validation
tuned_parameters = [{'kernel': ['rbf'], 'gamma': [1e-3, 1e-4],
'C': [1, 10, 100, 1000]},
{'kernel': ['linear'], 'C': [1, 10, 100, 1000]}]
scores = ['precision', 'recall']
for score in scores:
print("# Tuning hyper-parameters for %s" % score)
print()
clf = GridSearchCV(SVC(C=1), tuned_parameters, cv=5,
scoring='%s_weighted' % score)
clf.fit(X_train, y_train)
print("Best parameters set found on development set:")
print()
print(clf.best_params_)
print()
print("Grid scores on development set:")
print()
for params, mean_score, scores in clf.grid_scores_:
print("%0.3f (+/-%0.03f) for %r"
% (mean_score, scores.std() * 2, params))
print()
print("Detailed classification report:")
print()
print("The model is trained on the full development set.")
print("The scores are computed on the full evaluation set.")
print()
y_true, y_pred = y_test, clf.predict(X_test)
print(classification_report(y_true, y_pred))
print()
# Note the problem is too easy: the hyperparameter plateau is too flat and the
# output model is the same for precision and recall with ties in quality.
| bsd-3-clause |
wasade/american-gut-web | amgut/lib/locale_data/american_gut.py | 1 | 136152 | #!/usr/bin/env python
from __future__ import division
from amgut.lib.config_manager import AMGUT_CONFIG
# -----------------------------------------------------------------------------
# Copyright (c) 2014--, The American Gut Project Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
# Any media specific localizations
HELP_EMAIL = "[email protected]"
_SITEBASE = ''
media_locale = {
'SITEBASE': _SITEBASE,
'LOGO': _SITEBASE + '/static/img/ag_logo.jpg',
'ANALYTICS_ID': 'UA-55353353-1',
'LATITUDE': 39.83,
'LONGITUDE': -99.89,
'ZOOM': 4,
'STEPS_VIDEO': "http://player.vimeo.com/video/63542787",
'ADD_PARTICIPANT': 'http://player.vimeo.com/video/63931218',
'ADD_PARTICIPANT_IMG_1': _SITEBASE + "/static/img/add_participant.png",
'ADD_PARTICIPANT_IMG_MENU': _SITEBASE + "/static/img/add_participant_menu.png",
'LOG_SAMPLE_OPTS': _SITEBASE + "/static/img/log_sample_options.png",
'ADD_SAMPLE_HIGHLIGHT': _SITEBASE + "/static/img/add_sample_highlight.png",
'ADD_SAMPLE_OVERVIEW': _SITEBASE + "/static/img/add_sample_overview.png",
'FAQ_AMBIGUOUS_PASS': _SITEBASE + '/static/img/creds_example.png',
'SAMPLE_BARCODE': _SITEBASE + '/static/img/sample_barcode.jpg',
'SWAB_HANDLING': 'http://player.vimeo.com/video/62393487',
'HELP_EMAIL': HELP_EMAIL,
'PROJECT_TITLE': AMGUT_CONFIG.project_name,
'FAVICON': _SITEBASE + '/static/img/favicon.ico',
'FUNDRAZR_URL': 'https://fundrazr.com/campaigns/4Tqx5',
'NAV_PARTICIPANT_RESOURCES': 'Participant resources',
'NAV_HOME': 'Home',
'NAV_MICROBIOME_101': '%s 101' % AMGUT_CONFIG.project_shorthand,
'NAV_FAQ': 'FAQ',
'NAV_MICROBIOME_FAQ': 'Human Microbiome FAQ',
'NAV_ADDENDUM': 'How do I interpret my results?',
'NAV_PRELIM_RESULTS': 'Preliminary results!',
'NAV_CHANGE_PASSWORD': 'Change Password',
'NAV_CONTACT_US': 'Contact Us',
'NAV_LOGOUT': 'Log out',
'NAV_HUMAN_SAMPLES': 'Human Samples',
'NAV_RECEIVED': '(Received)',
'NAV_ADD_HUMAN': 'Add Human Source',
'NAV_ANIMAL_SAMPLES': 'Animal Samples',
'NAV_ADD_ANIMAL': 'Add Animal Source',
'NAV_ENV_SAMPLES': 'Environmental Samples',
'NAV_LOG_SAMPLE': 'Log Sample',
'NAV_JOIN_PROJECT': 'Join The Project',
'NAV_KIT_INSTRUCTIONS': 'Kit Instructions',
'NAV_PARTICIPANT_LOGIN': 'Participant Log In',
'NAV_FORGOT_KITID': 'I forgot my kit ID',
'NAV_INTERNATIONAL': 'International Shipping',
'NAV_FORGOT_PASSWORD': 'I forgot my password',
'ADDENDUM_CERT_TITLE': _SITEBASE + '/static/img/Michael_Pollan_mod-01.png',
'ADDENDUM_CERT_NAME': _SITEBASE + '/static/img/Michael_Pollan_mod-01b.png',
'ADDENDUM_CERT_HEADER': _SITEBASE + '/static/img/Michael_Pollan_mod-02.png',
'ADDENDUM_CERT_BARCHART': _SITEBASE + '/static/img/Michael_Pollan_mod-11.png',
'ADDENDUM_CERT_BARCHART_LEGEND': _SITEBASE + '/static/img/Michael_Pollan_mod-12.png',
'ADDENDUM_CERT_ABUNDANT_MICROBES': _SITEBASE + '/static/img/Michael_Pollan_mod-13.png',
'ADDENDUM_CERT_ENRICHED_MICROBES': _SITEBASE + '/static/img/Michael_Pollan_mod-14.png',
'ADDENDUM_CERT_RARE_MICROBES': _SITEBASE + '/static/img/Michael_Pollan_mod-15.png',
'ADDENDUM_CERT_HEADER_PCOA': _SITEBASE + '/static/img/Michael_Pollan_mod-03.png',
'ADDENDUM_CERT_PCOA_LEGEND': _SITEBASE + '/static/img/Michael_Pollan_mod-04.png',
'ADDENDUM_CERT_PCOA_BODYSITES': _SITEBASE + '/static/img/Michael_Pollan_mod-08.png',
'ADDENDUM_CERT_PCOA_AGES_POP': _SITEBASE + '/static/img/Michael_Pollan_mod-09.png',
'ADDENDUM_CERT_PCOA_AG_POPULATION': _SITEBASE + '/static/img/Michael_Pollan_mod-10.png',
'ADDENDUM_TAX_BARCHART': _SITEBASE + '/static/img/TaxFig.png',
'ADDENDUM_PCOA_BODYSITES': _SITEBASE + '/static/img/PCoA1.png',
'ADDENDUM_PCOA_AGES_POPS': _SITEBASE + '/static/img/PCoA2.png',
'ADDENDUM_PCOA_AG_POPULATION': _SITEBASE + '/static/img/PCoA3.png',
'PORTAL_DIET_QUESTIONS': _SITEBASE + '/static/img/diet_questions.png',
'PORTAL_SHIPPING': _SITEBASE + '/static/img/shipping.png',
'EMAIL_ERROR': "There was a problem sending your email. Please contact us directly at <a href='mailto:%(help_email)s'>%(help_email)s</a>" % {'help_email': HELP_EMAIL},
'EMAIL_SENT': 'Your message has been sent. We will reply shortly',
'SHIPPING_ADDRESS': "American Gut Project<br>Knight Lab, JSCBB<br>596 UCB<br>Boulder, CO 80309",
}
_HANDLERS = {
'JUVENILE_CONSENT_EXPECTED': "We are expecting a manual consent form for the juvenile user (%s)",
'PARTICIPANT_EXISTS': 'Participant %s already exists!',
'SUCCESSFULLY_ADDED': "Successfully added %s!",
'AUTH_REGISTER_SUBJECT': "%(project_shorthand)s Verification Code" % {'project_shorthand': AMGUT_CONFIG.project_shorthand},
'AUTH_REGISTER_PGP': "\n\nFor the PGP cohort, we are requesting that you collect one sample from each of the following sites:\n\nLeft hand\nRight hand\nForehead\nMouth\nFecal\n\nThis is important to ensure that we have the same types of samples for all PGP participants which, in turn, could be helpful in downstream analysis when looking for relationships between the microbiome and the human genome\n\n.",
'AUTH_REGISTER_BODY': "Thank you for registering with the %(project_name)s! Your verification code is:\n\n{0}\n\nYou will need this code to verifiy your kit on the %(project_shorthand)s webstite. To get started, please log into:\n\nhttp://microbio.me/BritishGut\n\nEnter the kit_id and password found inside your kit, verify the contents of your kit, and enter the verification code found in this email.{1}\n\nSincerely,\nThe %(project_shorthand)s Team" % {'project_shorthand': AMGUT_CONFIG.project_shorthand, 'project_name': AMGUT_CONFIG.project_name},
'KIT_REG_SUCCESS': 'Kit registered successfully.',
'INVALID_KITID': "Invalid Kit ID or Password",
'ADD_KIT_ERROR': "Could not add kit to database. Did you hit the back button while registering and press 'register user' again?",
'ADD_BARCODE_ERROR': "Could not add barcode to database. Did you hit the back button while registering and press 'register user' again?",
'CHANGE_PASS_BODY': 'This is a courtesy email to confirm that you have changed your password for your kit with ID %s. If you did not request this change, please email us immediately at {0}'.format(media_locale['HELP_EMAIL']),
'CHANGE_PASS_SUBJECT': '%(project_shorthand)s Password Reset' % {'project_shorthand': AMGUT_CONFIG.project_shorthand},
'RESET_PASS_BODY': 'The password on American Gut Kit ID %s has been reset please click the link below within two hours\nhttp://microbio.me/americangut/change_pass_verify/?email=%s;kitid=%s;passcode=%s',
'MINOR_PARENTAL_BODY': "Thank you for your interest in this study. Because of your status as a minor, we will contact you within 24 hours to verify parent/guardian consent.",
'MESSAGE_SENT': "Your message has been sent. We will reply shortly",
'KIT_IDS_BODY': 'Your {1} Kit IDs are %s. You are receiving this email because you requested your Kit ID from the {1} web page If you did not request your Kit ID please email {0} Thank you,\n The {1} Team\n'.format(media_locale['HELP_EMAIL'], AMGUT_CONFIG.project_shorthand),
'KIT_IDS_SUBJECT': '%(project_shorthand)s Kit ID' % {'project_shorthand': AMGUT_CONFIG.project_shorthand},
'BARCODE_ERROR': "ERROR: No barcode was requested"
}
# Template specific dicts
_FAQ = {
'FAQ_HEADER': "%(shorthand)s FAQ" % {"shorthand": AMGUT_CONFIG.project_shorthand},
'LOG_IN_WHAT_NOW_ANS_1': 'You need to follow the add participant workflow. Click on the "Add Source & Survey" tab located at the top of the page.',
'INFORMATION_IDENTIFY_ME': 'Can data describing my gut microbiome be used to identify me or a medical condition I have?',
'LOG_IN_WHAT_NOW_ANS_3': 'You can log a sample by clicking the "Log Sample" link in the menu. If you do not see the "Log Sample" link, then all of your barcodes have been assigned.',
'PARTICIPATE_WITH_DIAGNOSIS': 'Can I participate in the project if I am diagnosed with ...?',
'LOG_IN_WHAT_NOW_ANS_5': 'When adding a sample, please be sure to select the barcodes that matches the barcode on the sampling tube of the sample that you are logging',
'TAKES_SIX_MONTHS': 'Does it really take up to three months to get my results?',
'HOW_CHANGE_GUT': 'How can I change my gut microbiome?',
'BETTER_OR_WORSE': 'How can I tell if my gut microbiome is better or worse than other people in my category?',
'ONLY_FECAL_RESULTS_ANS': 'We have only sent out results for fecal samples and are in the process of evaluating how best to present the other sample types. Please see <a href="#faq12">the previous question </a>',
'DIFFERENT_WHATS_WRONG_WITH_ME_ANS': 'No! Your gut microbiome is as unique as your fingerprint so you should expect to see some differences. Many factors can affect your gut microbiome, and any differences you see are likely to be the result of one of these factors. Maybe your diet is different than most people your age. Maybe you just traveled somewhere exotic. Different does not necessarily mean bad.',
'WHEN_RESULTS_NON_FECAL_ANS': 'The vast majority of the samples we\'ve received are fecal, which was why we prioritized those samples. Much of the analysis and results infrastructure we\'ve put in place is applicable to other sample types, but we do still need to assess what specific representations of the data make the most sense to return to participants. We apologize for the delay.',
'FIND_DETAILED_INFO': 'Where can I find more detailed information about my sample?',
'ADD_PARTICIPANT': '<a href="%(add_participant_vid)s">%(shorthand)s - How to Add a Participant</a> from <a href="http://vimeo.com/user16100300">shelley schlender</a> on <a href="http://vimeo.com">Vimeo</a>.' % {"shorthand": AMGUT_CONFIG.project_shorthand, 'add_participant_vid': media_locale["ADD_PARTICIPANT"]},
'PASSWORD_DOESNT_WORK': "My password doesn't work!",
'COMBINE_RESULTS': 'My whole family participated, can we combine the results somehow?',
'PASSWORD_DOESNT_WORK_ANS': '<p>The passwords have some ambiguous characters in them, so we have this guide to help you decipher which characters are in your password.</p>'
'<p class="ambig">abcdefghijklmnopqrstuvwxyz<br>ABCDEFGHIJKLMNOPQRSTUVWXYZ<br>1234567890<br>1 = the number 1<br>l = the letter l as in Lima<br>0 = the number 0<br>O = the letter O as in Oscar<br>g = the letter g as in golf<br>q = the letter q as in quebec</p>',
'HANDLING_SWABS': '%(shorthand)s - Handling Your SWABS</a> from' % {"shorthand": AMGUT_CONFIG.project_shorthand},
'LOG_IN_WHAT_NOW_ANS_4': 'The generic add sample page looks like this:',
'RAW_DATA_ANS_2': 'Processed sequence data and open-access descriptions of the bioinformatic processing can be found at our <a href="https://github.com/qiime/American-Gut">Github repository</a>.</p>'
'<p>Sequencing of %(shorthand)s samples is an on-going project, as are the bioinformatic analyses. These resources will be updated as more information is added and as more open-access descriptions are finalized.' % {"shorthand": AMGUT_CONFIG.project_shorthand},
'RAW_DATA_ANS_1': '<P>The raw data can be fetched from the <a href=http://www.ebi.ac.uk/>European Bioinformatics Institute</a>. EBI is part of <a href=http://www.insdc.org/>The International Nucleotide Sequence Database Collaboration</a> and is a public warehouse for sequence data. The deposited %(project)s accessions so far are:<ol><li style="list-style-type:square"><a href="http://www.ebi.ac.uk/ena/data/view/ERP003819&display=html">ERP003819</a></li><li style="list-style-type:square"><a href="http://www.ebi.ac.uk/ena/data/view/ERP003822&display=html">ERP003822</a></li><li style="list-style-type:square"><a href="http://www.ebi.ac.uk/ena/data/view/ERP003820&display=html">ERP003820</a></li><li style="list-style-type:square"><a href="http://www.ebi.ac.uk/ena/data/view/ERP003821&display=html">ERP003821</a></li><li style="list-style-type:square"><a href="http://www.ebi.ac.uk/ena/data/view/ERP005367&display=html">ERP005367</a></li><li style="list-style-type:square"><a href="http://www.ebi.ac.uk/ena/data/view/ERP005366&display=html">ERP005366</a></li><li style="list-style-type:square"><a href="http://www.ebi.ac.uk/ena/data/view/ERP005361&display=html">ERP005361</a></li><li style="list-style-type:square"><a href="http://www.ebi.ac.uk/ena/data/view/ERP005362&display=html">ERP005362</a></li></ol>' % {"project": AMGUT_CONFIG.project_name},
'BETTER_OR_WORSE_ANS': 'Right now, you can\'t. We\'re still trying to understand what constitutes a normal or average gut microbiome, and we have a lot to learn about the functions of many of the microbes that inhabit the gut. Therefore, it\'s tough to know what combinations of microbes are best for nutrition and health. That\'s one reason collecting data from so many people is important - hopefully we can start to learn more about this.',
'LOOK_BELOW': "If you're still experiencing issues, look for your problem in the FAQ below",
'PASSWORD_SAME_VERIFICATION_ANS': 'No. Your <strong>password</strong> is printed on the sheet that you received with your kit in the mail. That sheet looks like this:</p>'
'<img src="%(FAQ_AMBIGUOUS_PASS)s"/><p>Your <strong>verification code</strong> is emailed to you. Look for the email: <br /><br /><strong>FROM:</strong> %(project)s (%(help_email)s)<br /><strong>SUBJECT:</strong> %(shorthand)s Kit ID & Verification Code' % {"shorthand": AMGUT_CONFIG.project_shorthand, "project": AMGUT_CONFIG.project_name, "FAQ_AMBIGUOUS_PASS": media_locale['FAQ_AMBIGUOUS_PASS'], 'help_email': media_locale['HELP_EMAIL']},
'TAKES_SIX_MONTHS_ANS': 'Yes. It takes about eight weeks for extractions, eight weeks for the remainder of the processing, and two weeks to do the actual sequencing. This is before any analysis and if everything goes as planned, with no delays - equipment down, run failures, reagents or other consumables back ordered. Things do sometimes go wrong, so we say up to three months.',
'PARTICIPATE_WITH_DIAGNOSIS_ANS': 'Of course! The only exclusion criteria are: you must be more than 3 months old and cannot be in prison. Please keep in mind that, for legal and ethical reasons, the %(project)s does not provide medically actionable results or advice.' % {"project": AMGUT_CONFIG.project_name},
'HOW_PROCESS_SAMPLES': 'How are the samples and data processed?',
'WHO_MICHAEL_POLLAN_ANS': 'Michael Pollan is a New York Times Best Seller for his books on diet and nutrition. Further information about Michael can be found <a href="http://michaelpollan.com/">here</a>.',
'WHO_MICHAEL_POLLAN': 'Who is Michael Pollan?',
'HOW_CHANGE_GUT_ANS': 'Although we still don\'t have a predictable way to change the gut microbiome in terms of increasing or decreasing the abundances of specific bacteria, we do know that a variety of factors influence gut microbial community composition. Diet is a major factor affecting the gut microbiome so by changing your diet, you may be able to affect your gut microbiome. We still don\'t fully understand probiotics but know that they can influence your gut microbiome while you are actively taking them. Factors such as stress can also influence the gut microbiome. However, it is important to remember that there are factors we can\'t change, such as age or genetics, that can affect the gut microbiome.',
'RAW_DATA': 'How can I get the raw data?',
'WATCH_VIDEOS': "Watch these helpful videos about what to do once you've received your kit!",
'INTRODUCTION_BEGINNING': '<a href="http://www.robrdunn.com">Rob Dunn</a> has provided this excellent introduction to some of the basics that every curious reader should check out!<br/> <br/>Rob is the author of the <a href="http://www.yourwildlife.org/the-wild-life-of-our-bodies/">Wild Life of Our Bodies</a>. He is an evolutionary biologist and writer at North Carolina State University. For more about your gut and all of your other parts, read more from Rob at <a href="http://www.robrdunn.com">www.robrdunn.com</a></p>'
'',
'INFORMATION_IDENTIFY_ME_ANS': 'No. First, all of your personal information has been de-identified in our database as mandated by institutional guidelines. Second, although each person has a unique gut microbiome, many of the unique qualities are at the species or strain level of bacteria. Our sequencing methods currently do not allow us to describe your gut microbiome in that much detail. Finally, for most medical conditions, there are no known, predictable patterns in gut microbial community composition. Research simply hasn\'t gotten that far yet.</p>'
'<p>We should also mention that since we are only interested in your microbes, we do not sequence human genomic DNA in our typical analyses. Where it is possible for human DNA to be sequenced (e.g., the Beyond Bacteria kits), we remove the human DNA using the same bioinformatics approaches undertaken in the NIH-funded Human Microbiome Project and approved by NIH bioethicists. Additionally, there is so little human DNA in fecal, skin and mucus samples that the chances of us being able to sequence your entire human genome are almost none, even if we tried.',
'FECAL_NO_RESULTS_ANS': 'On any given sequencing run (not just the %(shorthand)s), a small percentage of the samples fail for unknown reasons -- our methods are good but not perfect. This is one of the reasons the sample kits have two Q-tips. It allows us to perform a second microbial DNA extraction and re-sequence if the first attempt failed. We will be doing this for all of the samples that failed. If there was a technical problem with the sample itself (e.g. not enough microbes on the swab) that inhibits us from producing data for you, we will be re-contacting you about collecting another sample.' % {"shorthand": AMGUT_CONFIG.project_shorthand},
'MULTIPLE_KITS_DIFFERENT_TIMES_ANS': 'For best results, we recommend that you mail each sample within 24 hours of collection.',
'STEPS_TO_FOLLOW': '<a href="%(video)s">%(shorthand)s - Steps to Follow When Your Kit Arrives</a> from <a href="http://vimeo.com/user16100300">shelley schlender</a> on <a href="http://vimeo.com">Vimeo</a>.' % {"shorthand": AMGUT_CONFIG.project_shorthand, "video": media_locale["STEPS_VIDEO"]},
'WHY_TWO_SWABS': 'Why are there 2 swabs inside the tube?',
'MULTIPLE_KITS_DIFFERENT_TIMES': 'I have a 2+ sample kit, and would like to collect and send them in at different times',
'COMBINE_RESULTS_ANS': "We're still evaluating how best to present the data for samples that represent a family. We are mailing individual results now and will provide updated results through the web site later.",
'PASSWORD_SAME_VERIFICATION': 'Is my password the same as my verification code?',
'FECAL_NO_RESULTS': 'I sent in a fecal sample but did not get any results, what happened to them?',
'DIFFERENT_WHATS_WRONG_WITH_ME': "I'm different than other people in my category. Does that mean something is wrong with me?",
'WHY_TWO_SWABS_ANS_2': "<P>Each tube is used for <strong>one sample</strong>. The tube has two swabs in it because one is a backup in case the DNA does not amplify on the first swab.</p>"
"<p>Here's a video of Rob Knight talking about swab handling:</p>"
"<iframe src='%(swab_handling)s' width=''500'' height=''281'' frameborder=''0'' webkitallowfullscreen='' mozallowfullscreen='' allowfullscreen=''></iframe>" % {'swab_handling': media_locale['SWAB_HANDLING']},
'MISSING_METADATA_ANS': 'Metadata are information describing your age, gender, diet, etc. Missing metadata mean that this person did not provide us with this information.',
'WHERE_SEND_SAMPLE': 'Where do I send my sample?',
'LOG_IN_WHAT_NOW': "I'm logged in, what do I do now?",
'LOG_IN_WHAT_NOW_ANS_2': '<p>During this workflow you (or whomever is being sampled) will:</p>'
'<ol> <li>Add a participant</li><li>Provide electronic consent</li><li>Answer survey questions (including the diet questions)</li><li>Upon completion, become eligible to log samples</li> </ol><p>When participants are eligible, you will then see their name under the corresponding menu on the left, in this example we have just added the participant "Test":</p>'
'',
'PROJECT_101': '%(shorthand)s 101' % {"shorthand": AMGUT_CONFIG.project_shorthand},
'WHAT_FORMS_ANS': 'The instruction on the sampling instructions that requires you to "place your forms and the sample tube in preaddressed envelope" is leftover from a previous version of the sampling instructions. There are no forms for you to include inside the envelope with your sample. If you are shipping internationally, please visit the <a href="%(sitebase)s/international_shipping/">International Shipping Instructions</a></p>' % {'sitebase': media_locale['SITEBASE']},
'WHY_TWO_SWABS_ANS_1': 'Each sampling tube contains two swabs and looks like this:',
'MISSING_METADATA': 'What are missing metadata?',
'ONLY_FECAL_RESULTS': 'I sent more than one kind of sample, but I only received data for my fecal sample. What happened to my other samples?',
'NOT_A_BUSINESS_ANS': 'We have had many enquiries about our "service" or "business". %(shorthand)s is a donation-supported academic project that is a collaboration between the <a href="http://www.earthmicrobiome.org">Earth Microbiome Project</a> and the <a href="http://humanfoodproject.com/">Human Food Project</a>, primarily run out of the <a href="https://knightlab.colorado.edu/">Knight Lab</a> at the University of Colorado at Boulder, and is not a business or service. In particular, %(shorthand)s is not a diagnostic test (although the information gained through the project may in future contribute to the development of diagnostic tests). All data except for information that needs to be kept confidential for privacy reasons is openly and freely released into public databases, and the project is not intended to make a profit (any surplus funds would be recycled back into furthering human microbiome research).' % {"shorthand": AMGUT_CONFIG.project_shorthand},
'HOW_PROCESS_SAMPLES_ANS_1': 'The majority of the samples in the %(project)s are run through a processing pipeline designed to amplify a small region of a gene that is believed to be common to all Bacteria and Archaea. This gene, the 16S ribosomal RNA gene is like a barcode you find on your groceries, and serves as a marker for different organisms. There are quite a few different ways to assess the types of Bacteria and Archaea in a sample, including a variety of techniques even to look at this single gene. Every method has its biases, and comparing data between different methods is <a href="http://www.ncbi.nlm.nih.gov/pubmed/23861384">non-trivial</a> and can sometimes be nearly impossible. One of the primary goals of the %(shorthand)s is to provide data that can be used and reused by researchers worldwide, we have opted to use the standard protocols adopted by the <a href="http://earthmicrobiome.org">Earth Microbiome Project</a>, (<a href="http://www.ncbi.nlm.nih.gov/pubmed/22402401">Caporaso et al 2012</a>, and more detailed description of the <a href="http://www.earthmicrobiome.org/emp-standard-protocols/16s/">protocol</a>). This ensures that the data generated by the %(shorthand)s can be combined with the other 80,000 samples so far indexed by the EMP (as scientists, we get giddy about things like this).</p>' % {'shorthand': AMGUT_CONFIG.project_shorthand, 'project': AMGUT_CONFIG.project_name},
'HOW_PROCESS_SAMPLES_ANS_2': 'DNA sequencing is a complex challenge that involves an army of robots, ultra pure water that costs $75 per 10ml, and an amazing <a href="http://www.illumina.com/systems/miseq.ilmn">digital camera</a> that actually determines individual sequences one nucleotide at a time. The number of stunningly brilliant minds whose footprints exist in these methods is astounding. However, the challenges don\'t end once you get the DNA sequence - some might say they are just beginning. It turns out that figuring out what actually is in your sample, that is, what organisms these sequences correspond to, requires cutting edge computational approaches, supercomputers and caffeine for the people operating them. The questions being asked of the data are themselves complex, and volume of data being processed is simply phenomenal. To give you some idea, for each sample sequenced we obtain around 6 million nucleotides which we represent as letters (A, T, G or C, see <a href="http://en.wikipedia.org/wiki/Nucleotide">here</a> for more info), whereas Shakespeare\'s Hamlet only contains around 150,000 letters (ignoring spaces).</p>',
'HOW_PROCESS_SAMPLES_ANS_3': 'The primary software package we use for processing 16S sequence data is called Quantitative Insights into Microbial Ecology (<a href="http://www.qiime.org">QIIME</a>; <a href="http://www.ncbi.nlm.nih.gov/pubmed/20383131">Caporaso et al. 2010</a>). Using this package, we are able to start with raw sequence data and process it to so that we end up be able to explore the relationships within and between samples using a variety of statistical methods and metrics. To help in the process, we leverage a standard and comprehensive (to date) reference database called Greengenes (<a href="http://www.ncbi.nlm.nih.gov/pubmed/22134646">McDonald et al. 2011</a>; <a href="http://www.ncbi.nlm.nih.gov/pubmed/16820507">DeSantis et al. 2006</a>) that includes information on a few hundred thousand Bacteria and Archaea (it is likely that there are millions or more species of bacteria). Due to the molecular limitations of our approach, and the lack of a complete reference database (because the total diversity of microbes on Earth is still unknown), our ability to determine whether a specific organism is present has a margin of error on the order of millions of years, which limits our ability to assess specific strains or even species using this inexpensive technique (more expensive techniques, such as some of the higher-level perks, can provide this information). But all is not lost! By using the evolutionary history of the organisms as inferred by the small pieces of DNA that we have, we can begin to ask broad questions about the diversity within (see <a href="http://www.ncbi.nlm.nih.gov/pubmed/7972354">Faith 1994</a>) and between samples (see <a href="http://www.ncbi.nlm.nih.gov/pubmed/16332807">Lozupone and Knight 2005</a>), and whether the patterns observed relate to study variables (e.g., BMI, exercise frequency, etc).</p>',
'HOW_PROCESS_SAMPLES_ANS_4': 'The specifics on how the %(shorthand)s sequence data are processed can be found <a href="http://nbviewer.ipython.org/github/biocore/American-Gut/blob/master/ipynb/module2_v1.0.ipynb">here</a>, and are written up in an executable <a href="http://ipython.org/notebook">IPython Notebook</a>, which provides all the relevant processing steps in an open-source format. Be warned, processing the full %(shorthand)s dataset takes over 5,000 CPU hours right now (i.e. if you do it on your laptop it might take 7 months, even if you don\'t run out of memory: this might put the time it takes to get your results in perspective). This is the processing pipeline that we use on your data. As this project is a work in progress, we are versioning the processing pipeline as there will continue to be improvements to the process as the project moves forward.</p>' % {'shorthand': AMGUT_CONFIG.project_shorthand},
'HOW_PROCESS_SAMPLES_ANS_5': 'Additional information about the tools used in the %(project)s and our contributions to the microbiome community can be found in the following publications:',
'HOW_PROCESS_SAMPLES_ANS_6': '<ul> <li><a href="http://www.ncbi.nlm.nih.gov/pubmed/21552244">Minimum information about a marker gene sequence (MIMARKS) and minimum information about any (x) sequence (MIxS) specifications.</a></li> <li><a href="http://www.ncbi.nlm.nih.gov/pubmed/24280061">EMPeror: a tool for visualizing high-throughput microbial community data.</a></li> <li><a href="http://www.ncbi.nlm.nih.gov/pubmed/16332807">UniFrac: a new phylogenetic method for comparing microbial communities.</a></li> <li><a href="http://www.ncbi.nlm.nih.gov/pubmed/16893466">UniFrac--an online tool for comparing microbial community diversity in a phylogenetic context.</a></li> <li><a href="http://www.ncbi.nlm.nih.gov/pubmed/17220268">Quantitative and qualitative beta diversity measures lead to different insights into factors that structure microbial communities.</a></li> <li><a href="http://www.ncbi.nlm.nih.gov/pubmed/19710709">Fast UniFrac: facilitating high-throughput phylogenetic analyses of microbial communities including analysis of pyrosequencing and PhyloChip data.</a></li> <li><a href="http://www.ncbi.nlm.nih.gov/pubmed/20827291">UniFrac: an effective distance metric for microbial community comparison.</a></li> <li><a href="http://www.ncbi.nlm.nih.gov/pubmed/21885731">Linking long-term dietary patterns with gut microbial enterotypes.</a></li> <li><a href="http://www.ncbi.nlm.nih.gov/pubmed/23326225">A guide to enterotypes across the human body: meta-analysis of microbial community structures in human microbiome datasets.</a></li> <li><a href="http://www.ncbi.nlm.nih.gov/pubmed/22699609">Structure, function and diversity of the healthy human microbiome.</a></li> <li><a href="http://www.ncbi.nlm.nih.gov/pubmed/22699610">A framework for human microbiome research.</a></li> <li><a href="http://www.ncbi.nlm.nih.gov/pubmed/23587224">The Biological Observation Matrix (BIOM) format or: how I learned to stop worrying and love the ome-ome.</a></li> <li><a href="http://www.ncbi.nlm.nih.gov/pubmed/22134646">An improved Greengenes taxonomy with explicit ranks for ecological and evolutionary analyses of bacteria and archaea.</a></li> <li><a href="http://www.ncbi.nlm.nih.gov/pubmed/21304728">The Earth Microbiome Project: Meeting report of the "1 EMP meeting on sample selection and acquisition" at Argonne National Laboratory October 6 2010.</a></li> <li><a href="http://www.ncbi.nlm.nih.gov/pubmed/21304727">Meeting report: the terabase metagenomics workshop and the vision of an Earth microbiome project.</a></li> </ul>',
'HOW_PROCESS_SAMPLES_ANS_7': 'More detail on our work on the effects of storage conditions can be found in these publications:',
'HOW_PROCESS_SAMPLES_ANS_8': '<ul> <li><a href="http://www.ncbi.nlm.nih.gov/pubmed/20412303">Effect of storage conditions on the assessment of bacterial community structure in soil and human-associated samples.</a></li> <li><a href="http://www.ncbi.nlm.nih.gov/pubmed/20673359">Sampling and pyrosequencing methods for characterizing bacterial communities in the human gut using 16S sequence tags.</a></li> </ul>',
'HOW_PROCESS_SAMPLES_ANS_9': 'And more detail on our work on sequencing and data analysis protocols can be found in these publications:',
'HOW_PROCESS_SAMPLES_ANS_10': '<ul> <li><a href="http://www.ncbi.nlm.nih.gov/pubmed/17881377">Short pyrosequencing reads suffice for accurate microbial community analysis.</a></li> <li><a href="http://www.ncbi.nlm.nih.gov/pubmed/18723574">Accurate taxonomy assignments from 16S rRNA sequences produced by highly parallel pyrosequencers.</a></li> <li><a href="http://www.ncbi.nlm.nih.gov/pubmed/18264105">Error-correcting barcoded primers for pyrosequencing hundreds of samples in multiplex.</a></li> <li><a href="http://www.ncbi.nlm.nih.gov/pubmed/22237546">Selection of primers for optimal taxonomic classification of environmental 16S rRNA gene sequences.</a></li> <li><a href="http://www.ncbi.nlm.nih.gov/pubmed/22170427">Comparison of Illumina paired-end and single-direction sequencing for microbial 16S rRNA gene amplicon surveys.</a></li> <li><a href="http://www.ncbi.nlm.nih.gov/pubmed/21716311">Impact of training sets on classification of high-throughput bacterial 16s rRNA gene surveys.</a></li> <li><a href="http://www.ncbi.nlm.nih.gov/pubmed/21349862">PrimerProspector: de novo design and taxonomic analysis of barcoded polymerase chain reaction primers.</a></li> <li><a href="http://www.ncbi.nlm.nih.gov/pubmed/20383131">QIIME allows analysis of high-throughput community sequencing data.</a></li> <li><a href="http://www.ncbi.nlm.nih.gov/pubmed/22161565">Using QIIME to analyze 16S rRNA gene sequences from microbial communities.</a></li> <li><a href="http://www.ncbi.nlm.nih.gov/pubmed/23861384">Meta-analyses of studies of the human microbiota.</a></li> <li><a href="http://www.ncbi.nlm.nih.gov/pubmed/24060131">Advancing our understanding of the human microbiome using QIIME.</a></li> <li><a href="http://www.ncbi.nlm.nih.gov/pubmed/20534432">Global patterns of 16S rRNA diversity at a depth of millions of sequences per sample.</a></li> <li><a href="http://www.ncbi.nlm.nih.gov/pubmed/22402401">Ultra-high-throughput microbial community analysis on the Illumina HiSeq and MiSeq platforms.</a></li> <li><a href="http://www.ncbi.nlm.nih.gov/pubmed/23202435">Quality-filtering vastly improves diversity estimates from Illumina amplicon sequencing.</a></li> <li><a href="http://www.ncbi.nlm.nih.gov/pubmed/22699611">Human gut microbiome viewed across age and geography.</a></li> </ul>' % {"shorthand": AMGUT_CONFIG.project_shorthand, "project": AMGUT_CONFIG.project_name},
'ANOTHER_COPY_RESULTS_ANS': 'You can download a copy from our website. Log in with your account name and password, go to the left side bar, move your mouse to Human Samples -> PARTICIPANT NAME -> SAMPLE NUMBER, and then click on SAMPLE NUMBER.pdf to download it.' % {"shorthand": AMGUT_CONFIG.project_shorthand, "project": AMGUT_CONFIG.project_name},
'FIND_DETAILED_INFO_ANS': 'You can find the raw data from European Bioinformatics Institute (please see <a href="#faq8">here</a>) or download the copy of your result from our website (please see <a href="#faq20">here</a>).',
'WHEN_RESULTS_NON_FECAL': 'I sent in a non-fecal sample and have not received any results, when should I expect results?',
'WHAT_FORMS': 'What are the forms you talk about on the sampling instructions?',
'INTRODUCTION_WHAT_IS_GUT_HEAD': "What is a Gut?",
'INTRODUCTION_WHAT_IS_GUT': "Your gut is a hole that runs through your body. Your gut is actually, developmentally speaking, the outside of your body, but it has evolved many intricacies that make it seem like the inside. Your gut starts with your mouth and ends with your anus. In between food is changed into energy, feces, bacteria, viruses and a few other things. Your gut exacts a kind of metamorphosis on everything you eat, turning hotdog or grilled cheese, miraculously, into energy and, ultimately, cells, signals and even thoughts. We are only beginning to understand this process, a process in which microbes play (or fail to play) a major role.",
'INTRODUCTION_WHAT_IS_PROJECT_HEAD': "What is the %(project_name)s?" % {'project_name': AMGUT_CONFIG.project_name},
'INTRODUCTION_WHAT_IS_PROJECT': "<p>The %(project_name)s is a project in which scientists aim to work with non-scientists both to help them (AKA, you) understand the life inside their own guts and to do science. Science is coolest when it is informs our daily lives and what could possibly be more daily than what goes on in your gut? One of the big questions the %(project_shorthand)s scientists hope to figure out is what characterizes healthy and sick guts (or even just healthier and sicker guts) and how one might move from the latter to the former. Such is the sort of big lofty goal these scientists dream about at night (spirochetes rather than sugarplums dancing through their heads), but even the more ordinary goals are exciting. Even just beginning to know how many and which species live in our guts will be exciting, particularly since most of these species have never been studied, which is to say there are almost certainly new species inside you, though until you sample yourself (and all the steps that it takes to look at a sample happen— the robots, the swirling, the head scratching, the organizing of massive datasets), we won't know which ones. Not many people get to go to the rainforest to search for, much less discover, a new kind of monkey, but a new kind of bacteria, well, it is within (your toilet paper's) reach." % {'project_shorthand': AMGUT_CONFIG.project_shorthand, 'project_name': AMGUT_CONFIG.project_name},
'INTRODUCTION_WHAT_IS_16S_HEAD': "What is 16S rRNA?",
'INTRODUCTION_WHAT_IS_16S': "16S rRNA is a sort of telescope through which we see species that would otherwise be invisible. Let me explain. Historically, microbiologists studied bacteria and other microscopic species by figuring out what they ate and growing them, on petri dishes, in labs, in huge piles and stacks. On the basis of this approach— which required great skill and patience— thousands, perhaps hundreds of thousands, of studies were done. But then… in the 1960s, biologists including the wonderful radical <a href=\"http://www.robrdunn.com/2012/12/chapter-8-grafting-the-tree-of-life/\">Carl Woese</a>, began to wonder if the RNA and DNA of microbes could be used to study features of their biology. The work of Woese and others led to the study of the evolutionary biology of microbes but it also eventually led to the realization that most of the microbes around us were not culturable— we didn't know what they ate or what conditions they needed. This situation persists. No one knows how to grow the vast majority of kinds of organisms living on your body and so the only way to even know they are there is to look at their RNA. There are many bits of RNA and DNA that one might look at, but a bit called 16S has proven particularly useful.",
'INTRODUCTION_ROBOTS_HEAD': "Do you really have a robot?",
'INTRODUCTION_ROBOTS': "Look, here is the deal. Robots. Microbiologists use robots. Personally, I think the fact that microbiologists study the dominant fraction of life on Earth, a fraction that carries out most of the important process (and a fair bit of inexplicable magic) makes microbiologists cool. I am not a microbiologist; I am an evolutionary biologist and a writer, but I think that microbiologists are hipsters cloaked in scientists clothing (and language). But if the outrageousness of their quarry does not convince you they are hip, well, then, let me remind you, they have robots.<br/> <br/>The robots enable the scientists to rapidly extract the DNA and RNA from thousands of samples simultaneously. Specifically, they can load your samples into small plastic plates each with 96 little wells. The robot then loads chemicals into the wells and heats the chemically-laced wells enough to break open the bacterial cells in the sample, BAM! This releases the cell's DNA and RNA. The robots then decode the specific letters (nucleotides) of the 16S gene using the nucleotides dumped out of the broken microbial cells into these plates.",
'INTRODUCTION_TREE_HEAD': "Tree of life",
'INTRODUCTION_TREE': "There is an evolutionary tree inside you. Well, sort of. When scientists study the microbes in your gut (and from the same samples we could also study viruses, bacteriophages— the viruses that attack bacteria—, fungi or even the presence of animals such as worms of various sorts) they do so by looking at the 16s or other genetic code of the RNA on the swabs you send us. We compare the code of each bit of RNA we find to the code of species other people have collected and also the code of the other bits of RNA in your sample. As a result, we can actually use the results of your sample to map the species living in you onto an evolutionary tree. Your own genes occupy one tiny branch on the tree of life, but the species inside of you come from all over the evolutionary tree. In fact, in some people we find species from each of the major branches of the tree of life (archaea, bacteria, eukaryotes) and then also many of the smaller branches. Inside you, in other words, are the consequences of many different and ancient evolutionary stories.",
'INTRODUCTION_MICROBIOME_HEAD': "What is a microbiome?",
'INTRODUCTION_MICROBIOME': "A biome, as ecologists and evolutionary biologists like me historically used it is a self-contained ecosystem, where all the organisms can interact with each other and the environment in which they live, for example a rain forest is a biome, but it is made of smaller biomes, for example a tree is a biome for insects, then a single insect is a biome for bacteria. Therefore, these smaller biomes are often called microbiomes, in the case of you, it's your gut!… A microbiome is a small (micro) version of this larger phenomenon, a whole world within you.",
'INTRODUCTION_EAT_HEAD': "What do my microbes eat?",
'INTRODUCTION_EAT': "Everyplace you have ever set your hand or any other part of your body is covered in microbes. This is true of your gut, but also everything else. Microbes live in clouds. They live in ice. They live deep in the Earth. They also live in your colon, on your skin, and so on. It is reasonable to wonder what they eat. The short answer is everything. Microbes are thousands of times more variable when it comes to their diets than are animals, plants or even fungi. Some microbes can get their nitrogen out of the air; they can, in other words, eat air. Ain't that cool. Others, like us, eat other species, munching them in the world's coolest and most ubiquitous game of packman. The bacteria in your gut are also diverse in terms of their diets. If there are two hundred species of bacteria in your gut (and there probably are at least that many) then there are at least that many different combinations of things that they are eating.",
'INTRODUCTION_MICROBES_COME_FROM_HEAD': "Where do my microbes come from?",
'INTRODUCTION_MICROBES_COME_FROM': "If you had asked this question a few years ago, we would have had to say the stork. But increasingly we are beginning to understand more about where the specific zoo of species in you come from and it is a little grosser than the stork. If you were born vaginally, some of your gut microbes came from your mother's feces (birth, my friend, is messy). Some came from her vagina. Others came, if you were breast fed, from her milk. It is easiest for bacteria, it seems, to colonize our guts in the first moments of life. As we age, our stomachs become acidic. We tend to think of the acid of our stomachs as aiding in digestion; it does that but another key role of our stomachs is to prevent pathogenic species from getting into our guts. The trouble with this, well, there are a couple of problems. One is c-section birth. During c-section birth, microbes need to come from somewhere other than the mother's vagina and feces. The most readily available microbes tend to be those in the hospital. As a result, the microbes in c-section babies tend to, at least initially, resemble those of the hospital more than they resemble those of other babies. With time, many c-section babies eventually get colonized by enough good bacteria (from pet dogs, pet cats, their parents' dirty hands, etc..) to get good microbes, but it is a more chancy process. But then, the big question, one we just don't know the answer to, is which and how many microbes colonize our guts as we get older. How many microbes ride through the acid bath of our stomach on our food and take up residence? We know that bad bacteria, pathogens, do this, but just how often and how good ones do it is not well worked out. You might be thinking, what about yoghurt and I'll tell you the answer, definitely, is we don't really know. Do people who eat yoghurt have guts colonized by species from that yoghurt? Maybe, possibly, I bet they do, but we don't really know (though if we get enough samples from yoghurt and non yoghurt eaters, we could know).",
'INTRODUCTION_DISCOVER_HEAD': "What will we discover in your gut?",
'INTRODUCTION_DISCOVER': "When the early meetings were going on about this project, everyone sat around talking about what we might see from colon samples. One scientist was sure that we would see bacteria that looked like Elvis. Another though we would find Shakespeare's great lost play. But the truth is all that we are going to see from your gut are lists of nucleotides. Let me explain…<br/> <br/>Nucleotides are those hunks of protein out of which DNA and RNA are made. They come in different forms to which scientists have assigned names and letters. When the robots are done with the work, what they produce are lists of the nucleotides in all of 16S genes from all of the cells in your sample. These nucleotides tell the scientists which kinds of life are in your sample (and in you). But because we will only have samples of little stretches of the 16S genes, we won't know exactly which species are in you, just which lineages they are from. You might have the bacterial equivalent of a chimpanzee and a gorilla in you, but all we'll know from your sample is that there was an ape. Knowing you have a bacterial ape in your gut will, on its own, not tell you so much. The real information will come from context, statistical context. I know, that sounds boring, but I promise it is not.<br/> <br/>We think that hundreds of different things you do during your life, in addition to what your mother and father did (let's try not to think about that), your genes and even just where you grew up influence which species of microbes are found inside you. But we don't really know. The problem is humans are so darn complicated. What we need to be able to do is to compare large numbers of people, people who differ in many ways, to be able to sort out which variables are sometimes a little important and which ones are the big deal. Is a vegan gut very different from a vegetarian one? Does eating yoghurt make a big difference? Do the effects of a c-section birth last forever? These questions require us to compare many people, which is where you come in. Your sample, gives us context and it gives you context too. It won't be terribly exciting on its own (you will know which ancient lineages you have dividing and thriving inside you. OK, that is pretty cool on second thought), but it will be very exciting in context. Where do you fall relative to fish eaters, sick people healthy people, hunter gatherers, or even your dog? You will know and we will know. And this is not all.<br/> <br/>All of the questions I have mentioned so far are what I might call first order questions. How does this thing compare to that thing. But what we'd love to be able to answer are second order questions, contingent questions, questions such as whether the effect of your diet depends on your ethnicity (it probably does), whether the effect of having a dog depends on whether or not you live in the city (again, I bet it does) and so on. These questions are exactly the sort of question we have failed to be able to answer well when it comes to diet, because we don't have big enough samples sizes. We can see the forest for all of humans. Well, that isn't quite right, but you get the idea, we will be able to understand elaborate effects of multiple variables on the wilderness between your pie hole and the other hole and that, to us, is exciting.",
'INTRODUCTION_STORIES_HEAD': "A few of the stories of the evolutionary tree in your gut",
'INTRODUCTION_STORIES': "Some people have least favorite bacteria. Salmonella, for example, seems to have inspired some haters. But microbiologists also have favorite bacteria, as well they should. The stories of bacteria (and those who chase and study them) are among the most important of humanities stories and include the tales of many species without which we could not live, or whose presence or absence affects how we live. These species are as fascinating and, dare I say, lovely as pandas or koala bears, just harder to see and far more significant. I have begun to compile a book of the stories of some of the most common and interesting species you are likely to encounter— whether in your own gut, on your lettuce or the next time you sink your fingers into the soil. These stories will be available online here at <a href=\"http://invisiblelife.yourwildlife.org/\">Invisible Life</a> as they are compiled as a book, a book written by some of the very best science writers AND scientists out there. For starters, you might be interested to know that <a href=\"http://invisiblelife.yourwildlife.org/mycoplasma/\">the smallest species on Earth</a> is sometimes found inside humans and, once we look at your 16S, we will even know whether it lives in you. As more of these stories are written, they will appear here, eventually as an ebook, an ebook that you can reference when you find out what lives inside you to know whether your constant companion is a species we know everything about or, as is more typical, no one has ever studied. Like Charlie Chaplin once said… Wait, Charlie Chaplin was the one who didn't say anything wasn't he.",
'ANOTHER_COPY_RESULTS': 'Can I get another copy of my results?',
'NOT_A_BUSINESS': 'We are not a business',
'WHERE_SEND_SAMPLE_ANS': '<p>This is the shipping address:</p>'
'%(address)s<p>If you are shipping internationally, please see the <a href="%(sitebase)s/international_shipping/">international shipping instructions</a>.' % {'sitebase': media_locale['SITEBASE'], 'address': media_locale['SHIPPING_ADDRESS']}
}
_TAXA_SUMMARY = {'RESOLUTION_NOTE': "Note: Where there are blanks in the table below, the taxonomy could not be resolved in finer detail.",
'PERCENTAGES_NOTE': "Note: The percentages listed represent the relative abundance of each taxon. This summary is based off of normalized data. Because of limitations in the way the samples are processed, we cannot reliably obtain species level resolution. As such, the data shown are collapsed at the genus level.",
'DOWNLOAD_LINK': "Download the table"}
_HELP_REQUEST = {
'CONTACT_HEADER': "Contact the %(shorthand)s" % {"shorthand": AMGUT_CONFIG.project_shorthand},
'RESPONSE_TIMING': "We will send a response to the email address you supply within 24 hours.",
'FIRST_NAME': "First name",
'LAST_NAME': "Last name",
'EMAIL_ADDRESS': "Email address",
'PROBLEM_PROMPT': "Enter information related to your problem"
}
_DB_ERROR = {
'HEADER': 'Oops! There seems to be a database error.',
'MESSAGE': 'Please help us to debug by emailing us at <a href="mailto:%(help_email)s">%(help_email)s</a> and tell us exactly what happend before you got this error.' % {"help_email": media_locale["HELP_EMAIL"]},
'SIGNOFF': 'Thanks, <br /> The American Gut Team'
}
_404 = {
'MAIN_WARNING': '404: Page not found!',
'HELP_TEXT': 'Click <a href="mailto:%(help_email)s">HERE</a> to email us about the issue. Please include the URL you were trying to access:' % {'help_email': media_locale['HELP_EMAIL']}
}
_PARTICIPANT_OVERVIEW = {
'COMPLETED_CONSENT': 'Completed consent',
'COMPLETED_SURVEY': 'Completed survey',
'SAMPLES_ASSIGNED': 'Samples assigned',
'OVERVIEW_FOR_PARTICPANT': 'Overview for participant'
}
_ADD_SAMPLE_OVERIVIEW = {
'ADD_SAMPLE_TITLE': 'Choose your sample source ',
'ADD_SAMPLE_TITLE_HELP': 'The sample source is the person, animal or environment that the sample you are currently logging came from. If you took the sample from yourself, you should select yourself as the sample source.',
'ENVIRONMENTAL': 'Environmental',
'ADD_SAMPLE_1': 'If you don\'t see the sample source you want here, you need to add it. You can do this in ',
'ADD_SAMPLE_2': 'Step 2',
'ADD_SAMPLE_3': ' on the main page when you log in.',
}
_SAMPLE_OVERVIEW = {
'BARCODE_RECEIVED': 'Sample %(barcode)s. This sample has been received by the sequencing center!',
'DISPLAY_BARCODE': 'Sample %(barcode)s',
'RESULTS_PDF_LINK': 'Click this link to visualize sample %(barcode)s in the context of other microbiomes!',
'SAMPLE_NOT_PROCESSED': 'This sample has not yet been processed. Please check back later.',
'DATA_VIS_TITLE': 'Data Visualization',
'TAXA_SUM_TITLE': 'Taxa Summary',
'VIEW_TAXA_SUMMARY': 'View Taxa Summary',
'SAMPLE_STATUS': 'Sample Status',
'SAMPLE_SITE': 'Sample Site',
'SAMPLE_DATE': 'Sample Date',
'SAMPLE_TIME': 'Sample Time',
'SAMPLE_NOTES': 'Notes',
'REMOVE_BARCODE': 'Remove barcode %(barcode)s'
}
_NEW_PARTICIPANT_OVERVIEW = {
'ADD_NEW': 'Add a New Human Sample Source',
'EXPLANATION': 'You have entered the add human source workflow. During this workflow you will add a human source that represents whoever is being sampled. You be asked for consent to join the project and then asked survey questions.',
'ONCE_ADDED': 'Once you have added a human source, you will then see the name of that source in the left menu, and you will also have an option for adding a sample to that source. When you click that, you will be able to select the appropriate barcode and add sample metadata.',
'CONTINUE': 'Continue'
}
_INTERNATIONAL = {
'PAGE_TITLE': '%(shorthand)s International Shipping Instructions' % {'shorthand': AMGUT_CONFIG.project_shorthand},
'INT_PARTICIPANTS': 'International Participants:',
'INSTRUCTIONS_1': 'In order to comply with amended federal and IATA regulations, we are requesting that international participants return their sample tubes through FedEx International and that international participants follow the additional safely requirements for shipping human swab samples to the United States. Your airway bill must clearly identify the package as containing "human exempt specimens". The samples will additionally need to be packaged within a secondary containment to ensure that they can safely enter the United States.',
'INSTRUCTIONS_2': 'For shipment, you will need to use regular tape to seal the plastic tube that contains the swab, then place the swab in the provided brown mailing envelope and place the brown envelope inside a Tyvek/plastic mailer, <strong>which can be acquired free of charge from FedEx</strong>, when shipping the package, prior to FedEx shipment.',
'INSTRUCTIONS_3': 'If you do not follow these directions the sample will be destroyed by United States Customs at the port of entry into the United States.',
'YOUR_SAMPLES': 'Your samples',
'YOUR_SAMPLES_LIST': '<li>Are considered dried specimens</li><li>Must be shipped via FedEx</li><li>Must have tape to sealing the plastic tube that contains the swab</li><li>Must be placed in a buff mailing envelope with the buff envelope placed inside a Tyvek/plastic mailer prior to FedEx shipment</li><li>Must be shipped with an airway bill and must be labeled with the complete address of the sender and complete address of recipient, and with the words "Human exempt sample(s)"</li>'
}
_NEW_PARTICIPANT = {
'ADD_HUMAN_TITLE': 'Add a New Human Source',
'ADD_HUMAN_HELP_SUGGESTION': 'If you need help with the website, please use the contact mechanism in the menu to the left. Please do not email the people listed in this form for help, unless it has to do with an injury. ',
'CONSENT_TITLE': 'CONSENT TO PARTICIPATE IN A RESEARCH STUDY',
'TEXT_TITLE': 'Study Title: American Gut Project',
'TEXT_PI': 'Principal Investigator: Rob Knight',
'TEXT_PERSONNEL_TITLE': 'Key Personnel:',
'TEXT_NAME': 'Name',
'TEXT_ROLE': 'Role',
'TEXT_DEPARTMENT': 'Department',
'TEXT_PHONE': 'Phone Number',
'TEXT_EMAIL': 'E-mail',
'TEXT_NAME_1': 'Rob Knight',
'TEXT_ROLE_1': 'Principal Investigator',
'TEXT_DEPARTMENT_1': 'Biofrontiers Institute/HHMI',
'TEXT_PHONE_1': '303-492-1984',
'TEXT_EMAIL_1': '[email protected]',
'TEXT_NAME_2': 'Gail Ackermann',
'TEXT_ROLE_2': 'Co-I',
'TEXT_DEPARTMENT_2': 'Biofrontiers Institute',
'TEXT_PHONE_2': '303-492-7506',
'TEXT_EMAIL_2': '[email protected]',
'TEXT_PARTICIPATION_TITLE': 'Your participation in this research study is voluntary.',
'TEXT_PARTICIPATION_DESCRIPTION': ' Please think about the information below carefully. Feel free to ask questions before making your decision whether or not to participate. If you decide to participate, you will be asked to sign this form electronically and will receive a copy of the form by email at the address you supplied when you signed up for the study.',
'TEXT_BACKGROUND_TITLE': 'Purpose and Background',
'TEXT_BACKGROUND_DESCRIPTION_1': 'Trillions of microorganisms live on and within the human body, which is colonized at birth and continuously inhabited throughout a person\'s lifetime. Our resident microbes occupy many body habitats, including the skin and mucosal surfaces, and the gastrointestinal tract. Our microbial symbionts are largely harmless or beneficial; for example, we rely on our gut microbiota to aid in nutrition, resist pathogens, and educate our immune system. We would like to survey a large number of volunteers from the US and other countries including all body types, all dietary preferences and both healthy and unhealthy people to more clearly define the range of these microbial communities. We are interested in learning whether people with similar age, diet, environment, family, pets, body weight, or other features, also have similar microorganisms.',
'TEXT_BACKGROUND_DESCRIPTION_2': 'We anticipate that the entire study will be completed in 5 years. When the results are available for your samples we will provide you with an easy to understand summary of the microbial communities of your body and a summary of the combined results of other participants for comparison. We anticipate that the results for your samples will be available within 3-6 months of receipt in the laboratory. These results will be sent to you by email. ',
'TEXT_STUDY_TASK_TITLE': 'Study Tasks and Procedures',
'TEXT_STUDY_TASK_DESCRIPTION': 'If you agree to participate in this study you will be asked for your name , and to donate samples, to complete a questionnaire and to provide updated personal information. We will send you a sample kit that includes the swabs (which are individually wrapped in plastic and include a plastic sleeve for returning the sample to us), coded labels for the sample tubes, and a pre-addressed envelope with instructions on how to safely return the samples to us for analysis. ',
'TEXT_STUDY_TASK_DESCRIPTION_LIST_TITLE': 'Samples may include',
'TEXT_STUDY_TASK_DESCRIPTION_LIST_1': 'Stool (fecal) by collecting a smear from used bathroom tissue using a sterile polyester-tip swab',
'TEXT_STUDY_TASK_DESCRIPTION_LIST_2': 'Saliva using a sterile polyester-tip swab inserted into the mouth and sampling the surface of the tongue or inside of your cheek',
'TEXT_STUDY_TASK_DESCRIPTION_LIST_3': 'Skin using a sterile polyester-tip swab',
'TEXT_STUDY_TASK_DESCRIPTION_LIST_4': 'Nostril mucus by inserting a sterile polyester-tip swab gently into a nostril',
'TEXT_STUDY_TASK_DESCRIPTION_LIST_5': 'Vaginal mucus by inserting a sterile polyester-tip swab into the introitus of the vagina',
'TEXT_STUDY_TASK_DESCRIPTION_LIST_6': 'Ear wax by inserting a sterile polyester-tip swab gently into the ear',
'TEXT_STUDY_TASK_DESCRIPTION_LIST_7': 'Tears by inserting a sterile polyester-tip swab gently along the inner corner of the eyelid (avoiding contact with the eye).',
'TEXT_STUDY_TASK_DESCRIPTION_ADDITIONAL_1': 'We are requesting that you contribute funding to the project at a level that is commensurate with the number of swabs and the kinds of tests you are requesting. The basic package covers a single fecal swab.',
'TEXT_STUDY_TASK_DESCRIPTION_ADDITIONAL_2': 'After you submit your first set of samples, we may ask you to donate additional samples (up to 7 times) if you belong to a group that has specific diet, disease or age considerations. You will be contacted by email if we would like to repeat the sampling. Some participants will be asked to provide a detailed food diary that includes a list of everything you eat and drink every day for up to 6 months. If this is required we will contact you by email to confirm that you are willing to contribute this information.',
'TEXT_INTERNATIONAL_PARTICIPANT_TITLE': 'International Participants',
'TEXT_INTERNATIONAL_PARTICIPANT_DESCRIPTION_1': 'If you are an international participant from the United Kingdom or Australia, sample aggregation sites have been established at King\'s College, London, UK and the University of Queensland, Brisbane, Australia. Instructions for making use of these sites are available on the web site (http://www.americangut.org)',
'TEXT_INTERNATIONAL_PARTICIPANT_DESCRIPTION_2': 'For other international participants to comply with amended federal regulations and IATA regulations we are requesting that you return your sample tubes through FedEx international and follow additional requirements for safely shipping human swab samples. You will need label the airbill clearly for shipment identifying the samples as "human exempt specimens". The samples should be packaged with secondary containment to ensure that they can be safely returned. For this you will use tape to seal the plastic tube that contains the swab, wrap the sample tube in absorbent tissue then place the swab in a mailing envelope inside the Tyvek/plastic mailer prior to FedEx shipment. If you do not follow these directions the sample may be intercepted at the port of entry into the USA and destroyed.',
'TEXT_SURVEY_DESCRIPTION_TITLE': 'Description of Surveys/Questionnaires/Interview Questions',
'TEXT_SURVEY_DESCRIPTION_DESCRIPTION_1': 'You will be asked questions about your general personal information (age, sex, height, weight, ethnicity, place of birth, current ZIP code. We will ask if you recently moved and where you moved from., We will ask questions about general diet information (including whether you follow a special diet, if you have food allergies, whether you have cultural or religious food restrictions). Other questions address whether you have pets and the type of contact you have with these pets and your relationship to other people in this study. There is a section on health information including a history of allergies/asthma, if you suffer from migraines and if you have a history of irritable bowel disease. The questionnaire also asks you to complete a food log to assess the amount of protein, fat, carbohydrate, grains and vegetables in your diet. For this we suggest that you contact a free website that will allow you to estimate these amounts.',
'TEXT_SURVEY_DESCRIPTION_DESCRIPTION_2': 'Participants will be presented with the option of completing a Frequent Foods Questionnaire at a third party site (www.vioscreen.com).',
'TEXT_SURVEY_DESCRIPTION_DESCRIPTION_3': 'Some participants may be asked to keep a detailed food diary for up to 6 months listing all the foods they eat and drink in a day.',
'TEXT_DURATION_TITLE': 'Duration',
'TEXT_DURATION_DESCRIPTION': 'We anticipate that participant time commitment for sampling will be less than 15 minutes; to complete the questionnaire online will take no more than 45 minutes; and completing the food diary should take no more than10 minutes/day. If you choose to complete the FFQ at VioScreen, this will take an additional 40 minutes. The study will be conducted over a maximum period of 5 years to include all the people we are requesting permission to sample. We anticipate that results will be available within 3-6 months of sample receipt.',
'TEXT_WITHDRAWAL_TITLE': 'Study Withdrawal',
'TEXT_WITHDRAWAL_DESCRIPTION_1': 'Taking part in this study is completely <strong>voluntary</strong>. You do not have to participate if you don\'t want to. You may also leave the study at any time. If you leave the study before it is finished, there will be no penalty to you, and you will not lose any benefits to which you are otherwise entitled.',
'TEXT_WITHDRAWAL_DESCRIPTION_2': 'To withdraw from the study send email to the American Gut Project ([email protected]) using the email address you used to contact us about the study and include your access code so that we can delete your records.',
'TEXT_RISKS_TITLE': 'Risks and Discomforts',
'TEXT_RISKS_DESCRIPTION_1': 'There are no foreseeable risks for participating in this study. You should be aware that the samples you submit are not anonymous but we will make every effort to ensure that they remain confidential. The only staff associated with the study that will have access to confidential information (your name and address) will be those responsible for shipping the sample kit and questionnaire to you. When the samples are returned they will have an associated code but no personally identifiable information.',
'TEXT_BENEFITS_TITLE': 'Benefits',
'TEXT_BENEFITS_DESCRIPTION': 'You may not receive any direct benefit from taking part in this study other than you or your child\'s intrinsic interest in the scientific outcome.',
'TEXT_CONFIDENTIALITY_TITLE': 'Confidentiality',
'TEXT_CONFIDENTIALITY_DESCRIPTION_1': 'We will make every effort to maintain the privacy of your or your child\'s data. All data will transferred electronically to a secure database stored on a server (The beast) in a CU card access-controlled server room. There is no public accessibility to this server without VPN and a password. The code key will be stored in a single location on a password-protected database on a server in a CU-access card controlled server room. The code will be destroyed by deletion from the server at the end of the study. All other data including all electronic data will be de-identified (coded).',
'TEXT_CONFIDENTIALITY_LIST_TITLE': 'These are some reasons that we may need to share the information you give us with others:',
'TEXT_CONFIDENTIALITY_LIST_1': 'If it is required by law.',
'TEXT_CONFIDENTIALITY_LIST_2': 'If we think you or someone else could be harmed.',
'TEXT_CONFIDENTIALITY_LIST_3': 'Sponsors, government agencies or research staff sometimes look at forms like this and other study records. They do this to make sure the research is done safely and legally. Organizations that may look at study records include:',
'TEXT_CONFIDENTIALITY_LIST_4': 'Office for Human Research Protections or other federal, state, or international regulatory agencies',
'TEXT_CONFIDENTIALITY_LIST_5': 'The University of Colorado Boulder Institutional Review Board',
'TEXT_CONFIDENTIALITY_LIST_6': 'The sponsor or agency supporting the study: Howard Hughes Medical Institute and the American Gut Project.',
'TEXT_COMPENSATION_TITLE': 'Compensation',
'TEXT_COMPENSATION_DESCRIPTION': 'You will not receive any compensation for participating in this research.',
'TEXT_RIGHTS_TITLE': 'Participant Rights',
'TEXT_RIGHTS_DESCRIPTION': 'Taking part in this study is your choice. You may choose either to take part or not take part in the study. If you decide to take part in this study, you may leave the study at any time. No matter what decision you make, there will be no penalty to you in any way. You will not lose any of your regular benefits. We will tell you if we learn any new information that could change your mind about being in this research study. For example, we will tell you about information that could affect your health or well-being.',
'TEXT_INJURIES_TITLE': 'If You are Injured',
'TEXT_INJURIES_DESCRIPTION': 'Please call Rob Knight at 303-492-1984 or email <a href="mailto:[email protected]">Rob Knight</a>.',
'TEXT_QUESTIONS_TITLE': 'Contacts and Questions',
'TEXT_QUESTIONS_DESCRIPTION_1': 'For questions, concerns, or complaints about this study, call please call Rob Knight at 303-492-1984 or email <a href="mailto:[email protected]">Rob Knight</a>.',
'TEXT_QUESTIONS_DESCRIPTION_2': 'If you are injured as a result of participating in this study or for questions about a study-related injury, call Please call Rob Knight at 303-492-1984 or email <a href="mailto:[email protected]">Rob Knight</a>.',
'TEXT_QUESTIONS_DESCRIPTION_3': 'If you have questions about your rights as a research study participant, you can call the Institutional Review Board (IRB). The IRB is independent from the research team. You can contact the IRB if you have concerns or complaints that you do not want to talk to the study team about. The IRB phone number is (303) 735-3702.',
'TEXT_I_HAVE_READ_1': 'I have read (or someone has read to me) this form. I am aware that I am being asked to be in a research study. I have had a chance to ask all the questions I have at this time. I have had my questions answered in a way that is clear. I voluntarily agree to be in this study.',
'TEXT_I_HAVE_READ_2': 'I am not giving up any legal rights by signing this form. I will be sent a copy of this form to the email address I used to sign up for the study.',
'PARTICIPANT_NAME': 'Name of participant',
'PARTICIPANT_EMAIL': 'Email of participant',
'PARTICIPANT_IS_YOUNG': 'Participant is older than 3 months and younger than 18 years of age',
'PARTICIPANT_PARENT_1': 'Name of parent/guardian 1',
'PARTICIPANT_PARENT_2': 'Name of parent/guardian 2',
'PARTICIPANT_DECEASED_PARENTS': 'One or both parents are deceased or unable to consent.'
}
_MAP = {
'MAP_TITLE': 'Map Key',
'MAP_PARTICIPANT': ' Participant',
'MAP_KIT': ' Kit Verified',
'MAP_SAMPLE': ' Sample(s) Logged',
}
_FORGOT_PASSWORD = {'ENTER_ID_EMAIL': 'Enter your Kit ID and email',
'KIT_ID': 'Kit ID:',
'EMAIL': 'E-mail',
'EMAIL_RESET_PASSWORD': 'You will receive an email shortly with instructions to reset your password. Please check your email because you need to reset your password within two hours.',
'EMAIL_FAILED': '<p>There was a problem sending you the password reset code. Please contact us directly at <a href=\"mailto:%(help_email)s\" target=\"_blank\">%(help_email)s</a>.</p><p>Email contained: </p>' % {'help_email': media_locale['HELP_EMAIL']},
'NO_RECORD': '<p style="color:red;">This information does not match our records</p><p>Please email <a href="mailto:%(help_email)s">directly</a> for further assistance<p>' % {'help_email': media_locale['HELP_EMAIL']},
'SEND_EMAIL': 'Send email'}
_ERROR = {
'ERROR_OCCURED': 'AN ERROR HAS OCCURED!',
'ERROR_CONTACT': "The error has been logged and we will look into it. Please go back to the main page."
}
_RETREIVE_KITID = {
'UNKNOWN_EMAIL': 'This email address is not in our system',
'ENTER_EMAIL': 'Please Enter Your Email',
'SEND_EMAIL': 'Send Kit ID Email',
'EMAIL_SUCCESS': 'Your kit ID has been emailed to you. Please check your email.',
'EMAIL_CANTSEND': 'Mail can be sent only from microbio.me domain.',
'EMAIL_EXCEPTION': 'There was a problem sending you the kit ID. Please contact us directly at <a href=\"mailto:%(help_email)s\">%(help_email)s</a>.' % {'help_email': media_locale['HELP_EMAIL']},
'EMAIL_PROMPT': 'Email:'
}
_ADD_SAMPLE = {
'NEW_SAMPLE_TITLE': 'Log a new sample for',
'NEW_SAMPLE_DESCRIPTION_1': 'Choose the barcode from your kit that corresponds to the sample you are logging.',
'NEW_SAMPLE_DESCRIPTION_2': 'It is very important that the sample barcode matches <strong>exactly</strong> for downstream analysis steps.',
'SITE_SAMPLED': 'Site Sampled',
'DATE': 'Date',
'DATE_EXAMPLE': ' mm/dd/yyyy (Example: 05/07/2013)',
'TIME': 'Time',
'TIME_EXAMPLE': ' hh:mm AM/PM (Example: 04:35 PM)',
'NOTES': 'Additional Notes (optional)',
}
_REGISTER_USER = {
'ENTER_NAME': 'Please enter your name',
'ENTER_EMAIL': 'Please enter your email',
'REQUIRED_EMAIL': 'You must supply a valid email',
'ENTER_ADDRESS': 'Please enter your address',
'ENTER_CITY': 'Please enter your city',
'ENTER_STATE': 'Please enter your state',
'ENTER_ZIP': 'Please enter your zip',
'ENTER_COUNTRY': 'Please enter your country',
'REQUIRED_ZIP': 'Your zip must consist of at least 5 characters',
'EMAIL': 'Email',
'NAME': 'Name',
'ADDRESS': 'Address',
'CITY': 'City',
'STATE': 'State',
'ZIP': 'Zip',
'COUNTRY': 'Country',
'SUBMIT': 'Submit My Information'
}
_ADDENDUM = {
'TITLE': 'American Gut Addendum',
'INTRO': 'We\'d like to note that in general these data allow you to understand how similar or different you are to other people in terms of the bacterial composition of the sample you sent. The information about the microbes is at as fine level of a taxonomic resolution as we were able to achieve with our sequencing methods, and varies for different groups of microbes. Currently, we cannot tell you what it means if you have more or less of a certain bacteria than other people. Gut microbiome research is still new, and we have a lot to learn. Your participation in the American Gut Project will allow us to learn more, and we hope to update you with new findings as they emerge.',
'LEARN_MORE': 'Learn more about your certificate by clicking on a plot or table',
'MOD01ALT': 'Your American Gut Sample',
'MOD01bALT': 'Michael Pollan',
'MOD02ALT': 'What\'s in your sample?',
'MOD11ALT': 'Taxonomy Bar Charts',
'MOD12ALT': 'Major Phyla',
'MOD13ALT': 'Abundant Microbes',
'MOD14ALT': 'Enriched Microbes',
'MOD15ALT': 'Rare Microbes',
'MOD03ALT': 'How do your gut microbes compare to others?',
'MOD08ALT': 'PCoA of BodySites with HMP',
'MOD09ALT': 'PCoA of diets and age',
'MOD10ALT': 'PCoA of American Gut Data',
'RESULTS_CAPTION': 'Your certificate is designed to help you determine what was found in your sample, and how you compare to other people. Click on a graph or table to learn more.',
'SAMPLE_TITLE': 'What\'s in your %(PROJECT_TITLE)s sample?' % media_locale,
'TAXONOMY': 'Taxonomy',
'TAXONOMY_INTRO': 'Taxonomy is a system scientists use to describe all life on the planet. Taxonomy is commonly referred to as an organism\'s scientific name. This name allows us to understand how closely related two organisms are to each other. There are seven major levels of taxonomy that go from less specific to more specific. The phylum level represents very broad range of organisms that have <strong>evolved over hundreds of millions of years</strong> whereas the species level represents only a small subset of them that are <strong>much more closely related</strong>. Typically, names at the genus and species levels are written in <em>italics</em> or are <u>underlined</u> (in our tables, they are <em>italicized</em>). For instance, here is the list of taxonomic levels and names for humans and chimpanzees:',
'HUMAN_TAXONOMY': 'Human',
'HUMAN_TAXONOMY_KINGDOM': 'Kingdom: Animalia',
'HUMAN_TAXONOMY_PHYLUM': 'Phylum: Chordata',
'HUMAN_TAXONOMY_CLASS': 'Class: Mammalia',
'HUMAN_TAXONOMY_ORDER': 'Order: Primates',
'HUMAN_TAXONOMY_FAMILY': 'Family: Hominidae',
'HUMAN_TAXONOMY_GENUS': 'Genus: <em>Homo</em>',
'HUMAN_TAXONOMY_SPECIES': 'Species: <em>sapiens</em>',
'CHIMP_TAXONOMY': 'Chimpanzee',
'CHIMP_TAXONOMY_KINGDOM': 'Kingdom: Animalia',
'CHIMP_TAXONOMY_PHYLUM': 'Phylum: Chordata',
'CHIMP_TAXONOMY_CLASS': 'Class: Mammalia',
'CHIMP_TAXONOMY_ORDER': 'Order: Primates',
'CHIMP_TAXONOMY_FAMILY': 'Family: Hominidae',
'CHIMP_TAXONOMY_GENUS': 'Genus: <em>Pan</em>',
'CHIMP_TAXONOMY_SPECIES': 'Species: <em>troglodytes</em>',
'LACTO_TAXONOMY': 'Here is the same list for a common yogurt bacterium (<em>Lactobacillus delbrueckii</em>):',
'LACTO_TAXONOMY_KINGDOM': 'Bacteria',
'LACTO_TAXONOMY_PHYLUM': 'Firmicutes',
'LACTO_TAXONOMY_CLASS': 'Bacilli',
'LACTO_TAXONOMY_ORDER': 'Lactobacillales',
'LACTO_TAXONOMY_FAMILY': 'Lactobacillaceae',
'LACTO_TAXONOMY_GENUS': '<em>Lactobacillus</em>',
'LACTO_TAXONOMY_SPECIES': '<em>delbrueckii</em>',
'BACTAX_LINK': 'For more information on bacterial taxonomy, please refer to the following link: ',
'TOP': 'Back to the top',
'TAX_BARCHART': 'Taxonomy Bar Chart',
'TAX_BARCHART_TEXT_1': 'The taxonomy bar chart shows the abundances of bacterial types at the phylum level in your sample and compares it to other samples. Specifically, it shows you what percentage of all your bacteria belonged to each phyla. We also calculated the average percentage of each bacterial phylum across all samples, across samples from people with a similar diet to the one you reported, across samples from people of the same gender as you, across samples from everyone with a similar BMI to you, across samples from everyone with the same age as you, and for one specific person, Michael Pollan. You can compare the percentage of bacterial phyla in your sample (first bar) to all of these values to get an idea of how similar or different you are.',
'TAX_BARCHART_TEXT_2': '<strong>Firmicutes and Bacteroidetes are the two most abundant bacterial phyla in the human gut, but others are also present.</strong> Please see <a href = "#phyla">Major Bacterial Phyla</a> below for basic descriptions of these phyla.',
'ABUNDANT': 'Abundant Microbes',
'YOUR_ABUNDANT': 'Your most abundant microbes:',
'YOUR_ABUNDANT_TABLE_HEADER': '<th>Taxonomy</th><th>Sample</th>',
'OBSERVED_TAXON_1': '<td class = \'taxa\'>Family Prevotella</td><td class = \'row\'>24.9%</td>',
'OBSERVED_TAXON_2': '<td class = \'taxa\'>Family Ruminococcaceae</td><td class = \'row\'>13.4%</td>',
'OBSERVED_TAXON_3': '<td class = \'taxa\'>Family Lachnospiraceae</td><td class = \'row\'>10.1%</td>',
'OBSERVED_TAXON_4': '<td class = \'taxa\'>Genus <em>Bacteroides</em></td><td class = \'row\'>8.1%</td>',
'TAX_BARCHART_EXP': 'The first table shows the four most abundant groups of microbes in your sample. Although you had other bacteria, these are the ones that you have the most of. The percentages on the right (under "Sample") tell you what percent of all of your bacteria belong to these taxa.',
'ENRICHED': 'Enriched Microbes',
'YOUR_ENRICHED': 'Your most enriched microbes:',
'YOUR_ENRICHED_TABLE_HEADER': '<th>Taxonomy</th><th>Sample</th><th>Population</th><th>Fold</th>',
'YOUR_ENRICHED_1': '<td class = \'taxa\'>Genus <em>Clostridium</em></td><td class = \'row\'>2.5%</td><td class = \'row\'>0.3%</td><td class = \'row\'>7x</td>',
'YOUR_ENRICHED_2': '<td class = \'taxa\'>Genus <em>Finegoldia</em></td><td class = \'row\'>0.7%</td><td class = \'row\'>0.0%</td><td class = \'row\'>17x</td>',
'YOUR_ENRICHED_3': '<td class = \'taxa\'>Genus <em>Prevotella</em></td><td class = \'row\'>24.9%</td><td class = \'row\'>2.6%</td><td class = \'row\'>9x</td>',
'YOUR_ENRICHED_4': '<td class = \'taxa\'>Genus <em>Collinsella</em></td><td class = \'row\'>0.9%</td><td class = \'row\'>0.1%</td><td class = \'row\'>8x</td>',
'ENRICHED_EXP_1': 'The second table shows four microbes that you had more of compared to other people. It is likely that other participants also have these microbes in their sample, but we found substantially higher abundances of them in your sample relative to others. The percentages on the right tell you how many of your total bacteria (under "Sample") or of the total bacteria in an average person&s sample (under "Population") belong to these taxa. Since you have more of these bacteria than most other people, the percentage under "Sample" should be higher than the percentage under "Population".',
'ENRICHED_EXP_2': 'The fold change tells you how many more of these bacteria you have than the average participant. For example, if you have 20% Bacteria A and the average person in the population has 10% Bacteria A, you have twice as many Bacteria A. This would be a twofold (2x) difference. Please note that because the percentages we report on this sheet are rounded (e.g., 0.05% rounded to 0.1%), and your fold differences are calculated from values that are not rounded, the fold differences you see may be slightly distinct than what you would calculate based on the numbers you see.',
'RARE': 'Rare Taxa',
'RARE_TEXT_1': 'This sample included the following rare taxa: Genus <em>Varibaculum</em>, Genus <em>Neisseria</em>, Genus <em>Campylobacter</em>, Unclassified Order ML615J-28.',
'RARE_TEXT_2': 'This line shows four microbes that you have that are not commonly found in the type of sample you provided. Some other people may have them, but most people do not.',
'YOUR_COMPARE': 'How do your gut microbes compare to others?',
'COMPARE_TEXT_1': 'Here, we present three Principle Coordinates Plots. Each point on these plots represents the bacterial composition of one sample from one person. We take all of the information about the abundances of all the bacteria in each sample and compare them to each other using this type of plot. When two points are very close to each other, it means that the types of bacteria in those two samples are very similar. Points that are farther apart represent samples that are less similar to each other. The axes mean nothing in this context. It doesn\'t matter how high or low a point is on the plot. The only thing that matters is how close it is to other points.',
'COMPARE_TEXT_2': 'The large point represents your sample on each plot. This allows you to see how similar (close to) or different (far from) your sample is from others.',
'DIFFERENT_BODY_SITES': 'DIfferent Body Sites',
'DIFFERENT_BODY_SITES_ALT': 'PCoA by body site for AGP and HMP',
'DIFFERENT_BODY_SITES_TEXT': 'This plot lets you compare your sample to samples collected in other microbiome projects from several body sites. The color of each point tells you which project and body site the sample came from. HMP refers to the <a href = \'http://www.hmpdacc.org\'>Human Microbiome Project</a>, funded by the National Institutes of Health. You can see how your sample compared to fecal, oral, and skin samples from the Human Microbiome Project, as well as to fecal, oral, and skin samples from the American Gut Project, the Global Gut Project, and the Personal Genome Project. These samples have been combined in any category not labeled "HMP". The oval around each group of points shows you where an average sample from each project and body site should fall on the plot. These sometimes make it easier to see the patterns all the clusters of points make.',
'DIFFERENT_AGES_POPS': 'Different Ages and Populations',
'DIFFERENT_AGES_POPS_ALT': 'PCoA of international populations colored by age',
'DIFFERENT_AGES_POPS_TEXT': 'This plot lets you compare your sample to other fecal microbiome samples according to age and place of origin. The color of each point indicates the age of the person the sample was collected from, with red being the youngest and purple being the oldest. Also, on this plot, the ovals show where in the world each sample came from. The red oval shows you the area where an average sample from a Western country should fall. The yellow oval shows you where an average sample from an Amerindian population in Venezuela should fall. The blue oval shows you where an average sample from Malawi should fall. These data are from <a href = \'http://www.nature.com/nature/journal/v486/n7402/abs/nature11053.html\'>Yatsunenko et al. 2012</a>. We used these populations as a comparison to your sample since a large number of people with diverse ages were sampled in these populations. We have fewer data from other populations in other parts of the world.',
'AG_POPULATION': 'The American Gut Population',
'AG_POPULATION_ALT': 'PCoA of American Gut population colored by Firmicutes',
'AG_POPULATION_TEXT': 'This plot lets you compare your sample to other fecal microbiome samples we collected from American Gut participants. The color indicates the relative abundance of Firmicutes bacteria each sample had with red being the lowest and purple being the highest. If you had a lot of Firmicutes bacteria, then your sample should be purple, and you can look for other purple samples to see how similar your whole bacterial community is to other people with high amounts of Firmicutes. As in the other plots, the location of the point along the axes means nothing. Only its relative position compared to the other points is meaningful.',
'MAJOR_PHYLA': 'Major Bacterial Phyla',
'MAJOR_PHYLA_FIRMICUTES_HEADER': 'Firmicutes',
'MAJOR_PHYLA_FIRMICUTES_TEXT': 'A phylum of bacteria with generally Gram-positive (retain crystal violet dye) staining cell wall structure. The names is derived from Latin <em>firmus</em> for strong and <em>cutis</em> for skin. The cells are in the form of spheres called cocci (singular coccus) or rods called bacilli (singular bacillus). Firmicutes encompass bacteria that can be found in many different environments ranging from soil to wine to your gut. There are currently more than 274 genera representing 7 different classes of which Clostridia (anaerobes - no oxygen) and Bacilli (obligate or facultative aerobes) are the most significant. Both classes are predominantly saprophytic (getting nourishment from dead or decaying organic matter) playing an important role in the decomposition and nutrient mineralization processes, but also contain a few human pathogens (e.g. <em>Clostridium tetani</em> or <em>Bacillus anthracis</em>).',
'MAJOR_PHYLA_BACTEROIDETES_HEADER': 'Bacteroidetes',
'MAJOR_PHYLA_BACTEROIDETES_TEXT': 'A phylum of Gram-negative bacteria, rod-shaped, present in all sorts of environments such as soil, sediments, and fresh and marine waters. Most are saprophytic and involved in carbon cycling. Often abundant in nutrient-rich habitats and so they are a major component of animal guts where they can act as degraders of complex carbohydrates and proteins but also as pathogens. Their representatives are organized within 4 major classes among which the genus <em>Bacteroides</em> in the class of Bacteroidia is the most prevalent and the most studied. Bacteroidetes together with Firmicutes make up the majority of gut bacteria. The ratio of these two types of bacteria (specifically the dominance of Firmicutes over Bacteroidetes) may be linked to obesity.',
'MAJOR_PHYLA_PROTEOBACTERIA_HEADER': 'Proteobacteria',
'MAJOR_PHYLA_PROTEOBACTERIA_TEXT': 'A phylum of Gram-negative bacteria. They are named after a Greek God Proteus to illustrate their variety of forms. They are organized in 6 recognized classes and represent all types of metabolisms ranging from heterotrophic to photosynthetic to chemoautotrophic. They include many well-known pathogens (e.g., <em>Escherichia</em>, <em>Helicobacter</em>, <em>Salmonella</em>, <em>Vibrio</em>) as well as free-living types that can fix nitrogen (convert nitrogen present in the atmosphere into ammonia, a form of nitrogen available for plants\' uptake).',
'MAJOR_PHYLA_ACTINOBACTERIA_HEADER': 'Actinobacteria',
'MAJOR_PHYLA_ACTINOBACTERIA_TEXT': 'A phylum of Gram-positive bacteria both terrestrial and aquatic. They are mostly recognized as excellent decomposers of resilient organic compounds such as cellulose or chitin. Although some can be plant and animal pathogens, others are more known as producers of antibiotics (e.g. Streptomyces). In their body form, many resemble fungi by forming mycelial-like filaments.',
'MAJOR_PHYLA_VERRUCOMICROBIA_HEADER': 'Verrucomicrobia',
'MAJOR_PHYLA_VERRUCOMICROBIA_TEXT': 'A relatively new phylum with only a handful of described species. Although not the most abundant, they seem to be always present in soil, aquatic environments, and feces. Most likely they are involved in the decomposition of organic matter, with no known pathogens. While some may be autotrophs, others can be internal symbionts of microscopic eukaryotes such as protists or nematodes. Their name is derived from a wart-like appearance (<em>verruca</em> means wart) but they do not cause warts.',
'MAJOR_PHYLA_TENERICUTES_HEADER': 'Tenericutes',
'MAJOR_PHYLA_TENERICUTES_TEXT': 'A phylum of Gram-negative bacteria without a cell wall (<em>tener</em> - soft, <em>cutis</em> - skin) which are organized in a single class. Nutritionally, they represent variable pathways ranging from aerobic and anaerobic fermenters to commensals to strict pathogens of vertebrates (e.g., fish, cattle, wildlife). Among the best studied are Mycoplasmas with a fried egg-like shape and <em>Mycoplasma pneumoniae</em> is one of the best known examples of human pathogens causing pneumonia, bronchitis, and other respiratory conditions.',
'MAJOR_PHYLA_CYANOBACTERIA_HEADER': 'Cyanobacteria',
'MAJOR_PHYLA_CYANOBACTERIA_TEXT': 'A phylum of photosynthetic (plant-like) bacteria. The name comes from their blue pigment (in Greek <em>kyanos</em> - blue). They can grow as single cells or form filamentous colonies. They are extremely successful in every imaginable environment including places where other organisms are extremely limited like hot springs or cold Antarctic bare rocks. Through their incredible diversity and abundance, they contribute significantly to the global cycle of oxygen.',
'MAJOR_PHYLA_FUSOBACTERIA_HEADER': 'Fusobacteria',
'MAJOR_PHYLA_FUSOBACTERIA_TEXT': 'A phylum of rod-shaped Gram-negative bacteria. Known primarily as fermentative species but some can be pathogens. Can occur in anoxic (no oxygen) sediments as well as intestinal habitats of animals including humans.',
'CONTRIB': 'Contributors',
'SUPPORTERS': 'Supporters',
'SPONSORS': 'Sponsors',
'COLLABORATORS': 'Collaborators'
}
_PORTAL = {
'GREETING': 'Hi %(user_name)s! Please follow the steps below.',
'VERIFY_TAB': 'Verify Your Kit',
'ADD_SOURCE_TAB': 'Add Source <br>Survey',
'TAKE_SAMPLE_TAB': 'Take a Sample',
'LOG_SAMPLE_TAB': 'Log a Sample',
'MAIL_TAB': 'Mail Samples <br>to Us',
'SEQ_TAB': 'Sequencing &<br>Results',
'VERIFICATION_HEADER_1': 'Verification',
'VERIFICATION_TEXT_1': 'We ask you to verify that you received the correct sample tubes and kit. Using a <strong>Verification Code</strong> helps us ensure that you receive the correct barcodes and Credentials Sheet.',
'VERIFICATION_TEXT_2': 'our <strong>Verification Code</strong> will be sent to you via email to the address that you entered when you made your donation; if you made an anonymous donation, please <a href="%(sitebase)s/authed/help_request/">contact us directly</a>.' % {'sitebase': media_locale['SITEBASE']},
'VERIFICATION_TEXT_3': 'If you cannot find your <strong>Verification Code</strong>, please make sure to check your spam folder. If you still cannot find the code, please <a href="%(sitebase)s/authed/help_request/">contact us</a>.' % {'sitebase': media_locale['SITEBASE']},
'VERIFICATION_HEADER_2': 'Verify your identity and kit barcode(s)',
'VERIFICATION_CODE_PROMPT': 'Please enter the verification code sent to your email address <a href="#" class="help" title="If you did not recieve a verification code in your email from American Gut, please check your spam folder. If you still can not find it, contact %(help_email)s">(?)</a>' % {"help_email": media_locale["HELP_EMAIL"]},
'VERIFICATION_CODE_ERROR': 'The kit verification code you entered does not match our records. Please double-check the code you entered. If you continue to experience difficulties, please <a href=/authed/help_request/>contact us</a>.',
'VERIFY_BARCODES': 'Please verify that the barcode(s) you received in the mail match the barcode(s) here',
'VERIFY_BARCODES_POPUP': 'The barcode you need to verify is located on the outside of your sample tube.',
'SAMPLE_SOURCE_HEADER_1': 'Sample Source',
'SAMPLE_SOURCE_TEXT_1': 'There are three different sample sources that you can choose from for the %(project)s. The sources are human, animal and environmental. The buttons below will allow you to add a new sample source.',
'SAMPLE_SOURCE_TEXT_2': 'If you add a <strong>human</strong> or <strong>animal</strong> source, you will be asked to complete a survey',
'SAMPLE_SOURCE_TYPE_HUMAN': 'Human',
'SAMPLE_SOURCE_TYPE_ANIMAL': 'Animal',
'SAMPLE_SOURCE_TYPE_ENVIRONMENTAL': 'Environmental',
'SURVEY_HEADER_1': 'Survey',
'SURVEY_TEXT_1': 'If you are taking a human or animal sample, we ask that you complete a survey.',
'SURVEY_TEXT_2': 'The survey will take <strong>30-45 minutes</strong> for a human subject, or <strong>10 minutes</strong> for an animal subject. You <strong>cannot</strong> save in the middle of the survey, so please set aside enough time to complete the entire survey.',
'SURVEY_TEXT_3': 'If you are taking a human sample, the survey includes demographic, lifestyle, medical and diet questions. All survey questions are optional.',
'SURVEY_TEXT_4': 'The diet questions do not require a food diary, but please be prepared to answer questions about your eating habits. A screenshot of the dietary questions is shown below.',
'SAMPLE_STEPS_HEADER_1': 'Before Taking Your Samples',
'SAMPLE_STEPS_TEXT_1': 'These are the steps involved in taking a sample:',
'SAMPLE_STEPS_TEXT_2': '<li>Make sure you have <a href="#" onclick="selectTab(\'source\')">added your sample source and complete the required survey(s)</a></li><li>Remove the sample swabs from the sample tube</li><li>Collect your sample following the guidelines below</li><li>Place sample swabs into the sample tube</li>',
'SAMPLE_STEPS_TEXT_3': 'These sample collection instructions are very important, please read through them <strong>before</strong> beginning to take your sample. Deviations will cause issues with sample processing, sequencing, and data analysis. We cannot guarantee that we will be able to process your sample if the instructions are not followed, and <strong>we cannot offer replacements if instructions were not followed</strong>. Please do not hesitate to ask us questions at <a href="%(sitebase)s/authed/help_request/">%(help_email)s</a>.' % {"help_email": media_locale["HELP_EMAIL"], 'sitebase': media_locale['SITEBASE']},
'SAMPLE_STEPS_HEADER_2': 'Taking Your Samples',
'SAMPLE_STEPS_TEXT_4': 'Once you have removed the sample tube, only handle the sample swab by the red cap.',
'SAMPLE_STEPS_TEXT_5': 'For a <strong>fecal sample</strong>, rub both cotton tips on a fecal specimen (a used piece ofbathroom tissue). Collect a small amount of biomass. Maximum collection would be to saturate 1/2 a swab. <strong>More is not better!</strong> The ideal amount of biomass collected is shown below.',
'SAMPLE_STEPS_TEXT_6': 'For an <strong>oral sample</strong>, firmly rub both sides of both cotton tips on the surface of the tongue for 20 seconds. Take great caution not to touch the cheeks, teeth, or lips.',
'SAMPLE_STEPS_TEXT_7': 'For a <strong>skin sample</strong>, firmly rub both sides of both cotton tips over the skin surface being sampled for 20 seconds.',
'SAMPLE_STEPS_TEXT_8': 'For an <strong>other/environmental sample</strong>, firmly rub both sides of both cotton tips over the surface being sampled for 20 seconds.',
'SAMPLE_STEPS_TEXT_9': 'After you have finished taking your sample, return the swabs to the sample tube and push the red cap on firmly.',
'LOG_SAMPLE_HEADER_1': 'Logging Samples',
'LOG_SAMPLE_TEXT_1': 'Please write the sample site, date, and time on the sampling tube.',
'LOG_SAMPLE_TEXT_2': 'After writing the information on the sampling tube tube, <a href="%(sitebase)s/authed/add_sample_overview/">log the sample</a> in our system.' % {'sitebase': media_locale['SITEBASE']},
'MAILING_HEADER_1': 'Mailing samples',
'MAILING_TEXT_1': 'Once you have added a <a href="#" onclick="selectTab(\'source\')">sample source, completed the relevant survey</a> (if applicable), <a href="#" onclick="selectTab(\'sample\')">taken</a> and <a href="#" onclick="selectTab(\'log\')">logged your samples</a>, you should then mail the samples back to us.',
'MAILING_TEXT_2': 'Wrap the sample tube in absorbent tissue, such as facial tissue or paper towels, and mail it back as soon as possible. The absorbent tissue will help to keep the relative humidity within the package low.',
'MAILING_TEXT_3': 'We also recommend using a reinforced envelope to reduce the chance of losing your sample due to damaged packaging.',
'MAILING_TEXT_4': 'The sooner we receive your sample, the sooner we can get it stored in our -80C freezers and ready for processing!',
'MAILING_TEXT_5': '<strong>Do not refrigerate or freeze the samples</strong> if they cannot be shipped immediately. Store them in a cool dry place such as a cabinet or a closet.',
'DOMESTIC_HEADER_1': 'Domestic Shipping',
'DOMESTIC_TEXT_1': 'Shipping within the US should be less than $1.50, but we recommend taking the sample to the post office to get the proper postage. Getting the postage right on the first try is important since samples that spend a long time in transit will likely not produce the highest quality results.',
'DOMESTIC_TEXT_2': 'This is the shipping address:',
'DOMESTIC_TEXT_3': media_locale['SHIPPING_ADDRESS'],
'INTERNATIONAL_HEADER_1': 'International Shipping',
'INTERNATIONAL_TEXT_1': 'In order to comply with amended federal and IATA regulations, we are requesting that international participants return their sample tubes through FedEx International and that international participants follow the additional safely requirements for shipping human swab samples to the United States. Your airway bill must clearly identify the package as containing "human exempt specimens". The samples will additionally need to be packaged within a secondary containment to ensure that they can safely enter the United States.',
'INTERNATIONAL_TEXT_2': 'For shipment, you will need to use clear tape to secure the sample swabs to the sample tube, then place the sample tube in the provided buff mailing envelope. Then place the buff envelope inside a Tyvek/plastic mailer, <strong>which can be acquired free of charge from FedEx</strong>, when shipping the sample, prior to FedEx shipment.',
'INTERNATIONAL_TEXT_3': 'If you do not follow these directions the sample will be destroyed by United States Customs at the port of entry into the United States.',
'INTERNATIONAL_HEADER_2': 'Your samples',
'INTERNATIONAL_TEXT_4': '<li>Are considered dried specimens</li><li>Must be shipped via FedEx</li><li>Must have tape to sealing the plastic tube that contains the swab</li><li>Must be placed in a buff mailing envelope with the buff envelope placed inside a Tyvek/plastic mailer prior to FedEx shipment</li><li>Must be shipped with an airway bill and must be labeled with the complete address of the sender and complete address of recipient, and with the words "Human exempt sample(s)"</li>',
'RESULTS_HEADER_1': 'Sequencing & Results',
'RESULTS_TEXT_1': 'Once you have added a <a href="#" onclick="selectTab(\'source\')">sample source, completed the relevant survey</a> (if applicable), <a href="#" onclick="selectTab(\'sample\')">taken</a> and <a href="#" onclick="selectTab(\'log\')">logged your samples</a> and you have <a href="#" onclick="selectTab(\'mail\')">mailed the samples back to us</a>, we will then perform sequencing and analysis on your samples.',
'RESULTS_TEXT_2': 'Sequencing and data analysis can take up to 6 months, please be patient! We will let you know as soon as your samples have been sequenced and analyzed.',
'RESULTS_READY_HEADER_1': 'Your results are ready!',
'RESULTS_READY_TEXT_1': 'One or more of the samples you submitted have been sequenced, and the results are now available online! Currently, we have only processed fecal samples, but we will be processing samples from other body sites soon.',
'RESULTS_READY_TEXT_2': 'To access your available results, hover over "Human Samples" in the menu on the left, hover over your name, then click on your sample to view your results, or click one of the links below. The following barcodes are ready:',
'RESULTS_READY_TEXT_3': 'You will be able to view your results here on this website once they are available.'
}
_CHANGE_PASS_VERIFY = {
'TITLE': 'Please enter new password',
'NEW_PASSWORD': 'New Passord',
'HELP_NEW_PASSWORD': 'The new password you would like to use to log in from now on.',
'CONFIRM_PASSWORD': 'Confirm Password',
'HELP_CONFIRM_PASSWORD': "Repeat your New Password again, exactly as before. We ask you to repeat it here so that you don't accidentally change your password to something you did not intend.",
'BUTTON_TEXT': 'Change Password',
'NO_VALID_CODE': 'Your password change code is not valid. If you wish to change your password please <a href="%(sitebase)s/forgot_password/">start over</a>' % {'sitebase': media_locale['SITEBASE']},
'SUCCESS': 'Your password has been changed',
'NO_EMAIL_1': 'Could not send Email',
'NO_EMAIL_2': 'We attempted to email the message below:',
'NO_EMAIL_3': 'This is a courtesy email to confirm that you have changed your password for your kit with ID %(kitid)s If you did not request this change, please email us immediately at %(help_email)s.'
}
# helper tuples for the survey questions
_NO_RESPONSE_CHOICE = "Unspecified"
_YES_NO_CHOICES = (_NO_RESPONSE_CHOICE, 'Yes', 'No')
_YES_NO_NOTSURE_CHOICES = (_NO_RESPONSE_CHOICE, 'Yes', 'No', 'Not sure')
_FREQUENCY_MONTH_CHOICES = (_NO_RESPONSE_CHOICE,
'Never',
'Rarely (a few times/month)',
'Occasionally (1-2 times/week)',
'Regularly (3-5 times/week)',
'Daily')
_FREQUENCY_WEEK_CHOICES = (_NO_RESPONSE_CHOICE,
'Never',
'Rarely (less than once/week)',
'Occasionally (1-2 times/week)',
'Regularly (3-5 times/week)',
'Daily')
_DIAGNOSIS_CHOICE = (_NO_RESPONSE_CHOICE,
'I do not have this condition',
'Diagnosed by a medical professional (doctor, physician assistant)',
'Diagnosed by an alternative medicine practitioner',
'Self-diagnosed')
_ANIMAL_SURVEY = {
'GENERAL_TITLE': 'General',
'GENERAL_QUESTION_1': 'Animal type?',
'GENERAL_QUESTION_1_CHOICES': (_NO_RESPONSE_CHOICE,
'Dog',
'Cat',
'Small mammal',
'Large mammal',
'Fish',
'Bird',
'Reptile',
'Amphibian',
'Other'),
'GENERAL_QUESTION_2': 'Origin?',
'GENERAL_QUESTION_2_CHOICES': (_NO_RESPONSE_CHOICE,
'Breeder',
'Shelter',
'Home',
'Wild'),
'GENERAL_QUESTION_3': 'Age?',
'GENERAL_QUESTION_3_CHOICES': None,
'GENERAL_QUESTION_4': 'Gender?',
'GENERAL_QUESTION_4_CHOICES': (_NO_RESPONSE_CHOICE,
'Male',
'Female',
'Unknown'),
'GENERAL_QUESTION_5': 'Setting?',
'GENERAL_QUESTION_5_CHOICES': (_NO_RESPONSE_CHOICE,
'Urban',
'Suburban',
'Rural'),
'GENERAL_QUESTION_6': 'Weight category?',
'GENERAL_QUESTION_6_CHOICES': (_NO_RESPONSE_CHOICE,
'Underweight',
'Skinny',
'Normal',
'Chubby',
'Overweight'),
'GENERAL_QUESTION_7': 'Diet classification?',
'GENERAL_QUESTION_7_CHOICES': (_NO_RESPONSE_CHOICE,
'Carnivore',
'Omnivore',
'Herbivore'),
'GENERAL_QUESTION_8': 'Food source?',
'GENERAL_QUESTION_8_CHOICES': (_NO_RESPONSE_CHOICE,
'Pet store food',
'Human food',
'Wild food'),
'GENERAL_QUESTION_9': 'Food type?',
'GENERAL_QUESTION_9_CHOICES': (_NO_RESPONSE_CHOICE,
'dry',
'wet',
'both'),
'GENERAL_QUESTION_10': 'Food special attributes?',
'GENERAL_QUESTION_10_CHOICES': (_NO_RESPONSE_CHOICE,
'Organic',
'Grain free'),
'GENERAL_QUESTION_11': 'Social?',
'GENERAL_QUESTION_11_CHOICES': (_NO_RESPONSE_CHOICE,
'Lives alone with humans',
'Lives alone no/limited humans (shelter)',
'Lives with other animals and humans',
'Lives with other animals/limited humans'),
'GENERAL_QUESTION_12': 'Any pets the current animal lives with?',
'GENERAL_QUESTION_12_CHOICES': None,
'GENERAL_QUESTION_13': 'Add the age of any human that the current animal lives with',
'GENERAL_QUESTION_13_CHOICES': None,
'GENERAL_QUESTION_14': 'Add the gender of any human that the current animal lives with',
'GENERAL_QUESTION_14_CHOICES': (_NO_RESPONSE_CHOICE,
'Male',
'Female',
'Other'),
'GENERAL_QUESTION_15': 'Hours spent outside?',
'GENERAL_QUESTION_15_CHOICES': (_NO_RESPONSE_CHOICE,
'None',
'Less than 2',
'2-4',
'4-8',
'8+'),
'GENERAL_QUESTION_16': 'Toilet water access?',
'GENERAL_QUESTION_16_CHOICES': (_NO_RESPONSE_CHOICE,
'Regular',
'Sometimes',
'Never'),
'GENERAL_QUESTION_17': 'Coprophage?',
'GENERAL_QUESTION_17_CHOICES': (_NO_RESPONSE_CHOICE,
'High',
'Moderate',
'Low',
'Never'),
'SUPPLEMENTAL_COMMENTS': 'Please write anything else about this animal that you think might affect its microorganisms.'
}
_HUMAN_SURVEY_COMPLETED = {
'COMPLETED_HEADER': 'Congratulations!',
'COMPLETED_TEXT': 'You are now an enrolled participant in the %(PROJECT_TITLE)s!' % media_locale,
'AVAILABLE_SURVEYS': 'Below are a few additional surveys that you may be interested in completing. There is no requirement to take these surveys, and your decision does not affect your involvement in the project in anyway.',
'SURVEY_ASD': '<a href="http://www.anl.gov/contributors/jack-gilbert">Dr. Jack Gilbert</a> is exploring the relationship between gut dysbiosis and Autism Spectrum Disorders, and in conjunction with the American Gut Project, we started an ASD-Cohort study. This additional survey contains questions specific to that cohort, but it is open to any participant to take if they so choose. Please click <a href="%s">here</a> to take the ASD-Cohort survey.',
'SURVEY_VIOSCREEN': 'The American Gut Project and its sister projects are very interested in diet. If you\'d like to provide additional detail about your diet, please click <a href="%s">here</a> to take a detailed diet survey (known as an Food Frequency Questionnaire). This is a validated FFQ, and is the one used by the Mayo Clinic.'
}
# sourced from 12-0582_-_american_gut_questionnaire_amended_09012014__irb_appd_09.19.14
_HUMAN_SURVEY = {
# Personal information
'PERSONAL_PROMPT_TITLE': 'Personal information',
'PERSONAL_PROMPT_NAME': 'Name:',
'PERSONAL_PROMPT_GENDER': 'Gender:',
'PERSONAL_PROMPT_HEIGHT': 'Height',
'PERSONAL_PROMPT_COUNTRY_OF_BIRTH': 'Country of birth:',
'PERSONAL_PROMPT_TODAYSDATE': 'Today\'s date:',
'PERSONAL_PROMPT_BIRTHDATE_MONTH': 'Birth month:',
'PERSONAL_PROMPT_BIRTHDATE_YEAR': 'Birth year:',
'PERSONAL_PROMPT_WEIGHT': 'Weight:',
'PERSONAL_PROMPT_ZIP': 'Current ZIP code:',
'PERSONAL_PROMPT_WEIGHT_UNITS': 'Weight units:',
'PERSONAL_PROMPT_HEIGHT_UNITS': 'Height units:',
# General diet information
'GENERAL_DIET_TITLE': 'General Diet Information',
'GENERAL_DIET_QUESTION_0': 'How would you classify your diet?',
'GENERAL_DIET_QUESTION_0_CHOICES': (_NO_RESPONSE_CHOICE,
'Omnivore',
'Omnivore but do not eat red meat',
'Vegetarian',
'Vegetarian but eat seafood',
'Vegan'),
'GENERAL_DIET_QUESTION_1': 'Are you taking a daily multivitamin?',
'GENERAL_DIET_QUESTION_1_CHOICES': _YES_NO_CHOICES,
'GENERAL_DIET_QUESTION_2': 'How frequently do you take a probiotic?',
'GENERAL_DIET_QUESTION_2_CHOICES': _FREQUENCY_MONTH_CHOICES,
'GENERAL_DIET_QUESTION_3': 'How frequently do you take Vitamin B complex, folate or folic acid?',
'GENERAL_DIET_QUESTION_3_CHOICES': _FREQUENCY_MONTH_CHOICES,
'GENERAL_DIET_QUESTION_4': 'How frequently do you take Vitamin D supplement?',
'GENERAL_DIET_QUESTION_4_CHOICES': _FREQUENCY_MONTH_CHOICES,
'GENERAL_DIET_QUESTION_5': 'Are you taking any other nutritional/herbal supplements?',
'GENERAL_DIET_QUESTION_5_CHOICES': _YES_NO_CHOICES,
'GENERAL_DIET_QUESTION_6': 'Are you lactose intolerant?',
'GENERAL_DIET_QUESTION_6_CHOICES': _YES_NO_CHOICES,
'GENERAL_DIET_QUESTION_7': 'Are you gluten intolerant?',
'GENERAL_DIET_QUESTION_7_CHOICES': (_NO_RESPONSE_CHOICE,
'I was diagnosed with celiac disease',
'I was diagnosed with gluten allergy (anti-gluten IgG), but not celiac disease',
'I do not eat gluten because it makes me feel bad',
'No'),
'GENERAL_DIET_QUESTION_8': 'I am allergic to __________ (mark all that apply)',
'GENERAL_DIET_QUESTION_8_CHOICES': (_NO_RESPONSE_CHOICE,
'Peanuts',
'Tree nuts',
'Shellfish',
'Other',
'I have no food allergies that I know of.'),
'GENERAL_DIET_QUESTION_9': 'Do you eat a paleo, modified paleo, primal, FODMAP, Westen-Price, or other low-grain, low processed food diet?',
'GENERAL_DIET_QUESTION_9_CHOICES': _YES_NO_CHOICES,
'GENERAL_DIET_QUESTION_10': 'Do you eat meat/dairy products from animals treated with antibiotics?',
'GENERAL_DIET_QUESTION_10_CHOICES': _YES_NO_NOTSURE_CHOICES,
'GENERAL_DIET_QUESTION_11': 'Do you follow any other special diet restrictions other than those indicated above?',
'GENERAL_DIET_QUESTION_11_CHOICES': _YES_NO_CHOICES,
'GENERAL_DIET_QUESTION_12': 'What is your drinking water source at home?',
'GENERAL_DIET_QUESTION_12_CHOICES': (_NO_RESPONSE_CHOICE,
'City',
'Well',
'Bottled',
'Filtered',
'Not sure'),
# General information
'GENERAL_TITLE': 'General Information',
'GENERAL_QUESTION_13': 'What is your race/ethnicity?',
'GENERAL_QUESTION_13_CHOICES': (_NO_RESPONSE_CHOICE,
'Caucasian',
'Asian or Pacific Islander',
'African American',
'Hispanic',
'Other'),
'GENERAL_QUESTION_14': 'When did you move to current state of residence?',
'GENERAL_QUESTION_14_CHOICES': (_NO_RESPONSE_CHOICE,
'Within the past month',
'Within the past 3 months',
'Within the past 6 months',
'Within the past year',
'I have lived in my current state of residence for more than a year.'),
'GENERAL_QUESTION_15': 'I have traveled outside of outside of my country of residence in the past _________.',
'GENERAL_QUESTION_15_CHOICES': (_NO_RESPONSE_CHOICE,
'Month',
'3 months',
'6 months',
'1 year',
'I have not been outside of the United States in the past year.'),
'GENERAL_QUESTION_16': 'How many non-family roommates do you have?',
'GENERAL_QUESTION_16_CHOICES': (_NO_RESPONSE_CHOICE,
'None',
'One',
'Two',
'Three',
'More than three'),
'GENERAL_QUESTION_17': 'Are any of your roommates participating in this study?',
'GENERAL_QUESTION_17_CHOICES': _YES_NO_NOTSURE_CHOICES,
'GENERAL_QUESTION_18': 'Are you related to or live with any of the other participants in this study?',
'GENERAL_QUESTION_18_CHOICES': _YES_NO_NOTSURE_CHOICES,
'GENERAL_QUESTION_19': 'Do you have a dog(s)?',
'GENERAL_QUESTION_19_CHOICES': _YES_NO_CHOICES,
'GENERAL_QUESTION_20': 'Do you have a cat(s)?',
'GENERAL_QUESTION_20_CHOICES': _YES_NO_CHOICES,
'GENERAL_QUESTION_21': 'Which is your dominant hand?',
'GENERAL_QUESTION_21_CHOICES': (_NO_RESPONSE_CHOICE,
'I am right handed',
'I am left handed',
'I am ambidextrous'),
'GENERAL_QUESTION_22': 'What is your highest level of education?',
'GENERAL_QUESTION_22_CHOICES': (_NO_RESPONSE_CHOICE,
'Did not complete high school',
'High School or GED equilivant',
'Some college or technical school',
'Associate\'s degree',
'Bachelor\'s degree',
'Some graduate school or professional',
'Graduate or Professional degree'),
# General lifestyle and hygiene information
'LIFESTYLE_HYGIENE_TITLE': 'General Lifestyle and Hygiene Information',
'LIFESTYLE_HYGIENE_QUESTION_23': 'How often do you exercise?',
'LIFESTYLE_HYGIENE_QUESTION_23_CHOICES': _FREQUENCY_MONTH_CHOICES,
'LIFESTYLE_HYGIENE_QUESTION_24': 'Do you generally exercise indoors or outdoors?',
'LIFESTYLE_HYGIENE_QUESTION_24_CHOICES': (_NO_RESPONSE_CHOICE,
'Indoors',
'Outdoors',
'Both',
'Depends on the season',
'None of the above'),
'LIFESTYLE_HYGIENE_QUESTION_25': 'Do you bite your fingernails?',
'LIFESTYLE_HYGIENE_QUESTION_25_CHOICES': _YES_NO_CHOICES,
'LIFESTYLE_HYGIENE_QUESTION_26': 'How often do you use a swimming pool/hot tub?',
'LIFESTYLE_HYGIENE_QUESTION_26_CHOICES': _FREQUENCY_MONTH_CHOICES,
'LIFESTYLE_HYGIENE_QUESTION_27': 'How often do you smoke cigarettes?',
'LIFESTYLE_HYGIENE_QUESTION_27_CHOICES': _FREQUENCY_MONTH_CHOICES,
'LIFESTYLE_HYGIENE_QUESTION_28': 'How often do you drink alcohol?',
'LIFESTYLE_HYGIENE_QUESTION_28_CHOICES': _FREQUENCY_MONTH_CHOICES,
'LIFESTYLE_HYGIENE_QUESTION_29': 'What type(s) of alcohol do you typically consume (select all that apply)?',
'LIFESTYLE_HYGIENE_QUESTION_29_CHOICES': (_NO_RESPONSE_CHOICE,
'Beer/Cider',
'Sour beers',
'White wine',
'Red wine',
'Spirits/hard alcohol'),
'LIFESTYLE_HYGIENE_QUESTION_30': 'How often do you brush your teeth?',
'LIFESTYLE_HYGIENE_QUESTION_30_CHOICES': _FREQUENCY_MONTH_CHOICES,
'LIFESTYLE_HYGIENE_QUESTION_31': 'How often do you floss your teeth?',
'LIFESTYLE_HYGIENE_QUESTION_31_CHOICES': _FREQUENCY_MONTH_CHOICES,
'LIFESTYLE_HYGIENE_QUESTION_32': 'How often do you wear facial cosmetics?',
'LIFESTYLE_HYGIENE_QUESTION_32_CHOICES': _FREQUENCY_MONTH_CHOICES,
'LIFESTYLE_HYGIENE_QUESTION_33': 'Do you use deodorant or antiperspirant (antiperspirants generally contain aluminum)?',
'LIFESTYLE_HYGIENE_QUESTION_33_CHOICES': (_NO_RESPONSE_CHOICE,
'I use deodorant',
'I use an antiperspirant',
'Not sure, but I use some form of deodorant/antiperspirant',
'I do not use deodorant or an antiperspirant'),
'LIFESTYLE_HYGIENE_QUESTION_34': 'Approximately how many hours of sleep to you get in an average night?',
'LIFESTYLE_HYGIENE_QUESTION_34_CHOICES': (_NO_RESPONSE_CHOICE,
'Less than 5 hours',
'5-6 hours',
'6-7 hours',
'7-8 hours',
'8 or more hours'),
'LIFESTYLE_HYGIENE_QUESTION_35': 'Do you use fabric softener when drying your clothes?',
'LIFESTYLE_HYGIENE_QUESTION_35_CHOICES': _YES_NO_CHOICES,
# General health information
'HEALTH_TITLE': 'General Health Information',
'HEALTH_QUESTION_36': 'How many times do you have a bowel movement in an average day?',
'HEALTH_QUESTION_36_CHOICES': (_NO_RESPONSE_CHOICE,
'Less than one',
'One',
'Two',
'Three',
'Four',
'Five or more'),
'HEALTH_QUESTION_37': 'Describe the quality of your bowel movements:',
'HEALTH_QUESTION_37_CHOICES': (_NO_RESPONSE_CHOICE,
'I tend to be constipated (have difficulty passing stool)',
'I tend to have diarrhea (watery stool)',
'I tend to have normal formed stool',
'I don\'t know, I do not have a point of reference'),
'HEALTH_QUESTION_38': 'I have taken antibiotics in the last ____________.',
'HEALTH_QUESTION_38_CHOICES': (_NO_RESPONSE_CHOICE,
'Week',
'Month',
'6 months',
'Year',
'I have not taken antibiotics in the past year.'),
'HEALTH_QUESTION_39': 'I have received a flu vaccine in the last ____________.',
'HEALTH_QUESTION_39_CHOICES': (_NO_RESPONSE_CHOICE,
'Week',
'Month',
'6 months',
'Year',
'I have not gotten the flu vaccine in the past year.'),
'HEALTH_QUESTION_40': 'Are you currently using some form of hormonal birth control?',
'HEALTH_QUESTION_40_CHOICES': (_NO_RESPONSE_CHOICE,
'Yes, I am taking the "pill"',
'Yes, I use an injected contraceptive (DMPA)',
'Yes, I use a contraceptive patch (Ortho-Evra)',
'Yes, I use the NuvaRing',
'Yes, I use a hormonal IUD (Mirena)',
'No'),
'HEALTH_QUESTION_41': 'Are you currently pregnant?',
'HEALTH_QUESTION_41_CHOICES': _YES_NO_NOTSURE_CHOICES,
'HEALTH_QUESTION_42': 'My weight has _________ within the last 6 months.',
'HEALTH_QUESTION_42_CHOICES': (_NO_RESPONSE_CHOICE,
'Increased more than 10 pounds',
'Decreased more than 10 pounds',
'Remained stable'),
'HEALTH_QUESTION_43': 'Have you had your tonsils removed?',
'HEALTH_QUESTION_43_CHOICES': _YES_NO_NOTSURE_CHOICES,
'HEALTH_QUESTION_44': 'Have you had you appendix removed?',
'HEALTH_QUESTION_44_CHOICES': _YES_NO_NOTSURE_CHOICES,
'HEALTH_QUESTION_45': 'Have you had chickenpox?',
'HEALTH_QUESTION_45_CHOICES': _YES_NO_NOTSURE_CHOICES,
'HEALTH_QUESTION_46': 'Do you currently take prescription medication for facial acne?',
'HEALTH_QUESTION_46_CHOICES': _YES_NO_CHOICES,
'HEALTH_QUESTION_47': 'Do you use over the counter products to control facial acne?',
'HEALTH_QUESTION_47_CHOICES': _YES_NO_CHOICES,
'HEALTH_QUESTION_48': 'Do you currently take over the counter or prescription medication for other conditions?',
'HEALTH_QUESTION_48_CHOICES': _YES_NO_CHOICES,
'HEALTH_QUESTION_49': 'Were you born via Caesarean section (C-section)?',
'HEALTH_QUESTION_49_CHOICES': _YES_NO_NOTSURE_CHOICES,
'HEALTH_QUESTION_50': 'How were you fed as an infant?',
'HEALTH_QUESTION_50_CHOICES': (_NO_RESPONSE_CHOICE,
'Primarily breast milk',
'Primarily infant formula',
'A mixture of breast milk and formula',
'Not sure'),
'HEALTH_QUESTION_51.03': 'Have you ever been diagnosed with ADD/ADHD?',
'HEALTH_QUESTION_51.03_CHOICES': _DIAGNOSIS_CHOICE,
'HEALTH_QUESTION_51.04': 'Have you ever been diagnosed with Alzheimer\'s Disease/Dementia?',
'HEALTH_QUESTION_51.04_CHOICES': _DIAGNOSIS_CHOICE,
'HEALTH_QUESTION_51.05': 'Have you ever been diagnosed with Asthma, Cystic Fibrosis or Lung Disease?',
'HEALTH_QUESTION_51.05_CHOICES': _DIAGNOSIS_CHOICE,
'HEALTH_QUESTION_51.06': 'Have you ever been diagnosed with Autism or Autism Spectrum Disorder?',
'HEALTH_QUESTION_51.06_CHOICES': _DIAGNOSIS_CHOICE,
'HEALTH_QUESTION_51.07': 'Have you ever been diagnosed with Autoimmune disease (i.e. Lupus, RA, MS, Hashimoto\'s thyroiditis), not including IBD (irritable bowel disease) or type I diabetes?',
'HEALTH_QUESTION_51.07_CHOICES': _DIAGNOSIS_CHOICE,
'HEALTH_QUESTION_51.08': 'Have you ever been diagnosed with Candida or fungal overgrowth in the gut?',
'HEALTH_QUESTION_51.08_CHOICES': _DIAGNOSIS_CHOICE,
'HEALTH_QUESTION_51.09': 'Have you ever been diagnosed with Clostridium difficile (C. diff) infection?',
'HEALTH_QUESTION_51.09_CHOICES': _DIAGNOSIS_CHOICE,
'HEALTH_QUESTION_51.10': 'Have you ever been diagnosed with coronary artery disease, heart disease, heart attack, stroke?',
'HEALTH_QUESTION_51.10_CHOICES': _DIAGNOSIS_CHOICE,
'HEALTH_QUESTION_51.11': 'Have you ever been diagnosed with depression, bipolar disorder or schizophrenia?',
'HEALTH_QUESTION_51.11_CHOICES': _DIAGNOSIS_CHOICE,
'HEALTH_QUESTION_51.12': 'Have you ever been diagnosed with diabetes?',
'HEALTH_QUESTION_51.12_CHOICES': _DIAGNOSIS_CHOICE,
'HEALTH_QUESTION_51.13': 'Have you ever been diagnosed with epilepsy or seizure disorder?',
'HEALTH_QUESTION_51.13_CHOICES': _DIAGNOSIS_CHOICE,
'HEALTH_QUESTION_51.14': 'Have you ever been diagnosed with irritable bowel syndrome (IBS)?',
'HEALTH_QUESTION_51.14_CHOICES': _DIAGNOSIS_CHOICE,
'HEALTH_QUESTION_51.15': 'Have you ever been diagnosed with inflammatory bowel disease (IBD)?',
'HEALTH_QUESTION_51.15_CHOICES': _DIAGNOSIS_CHOICE,
'HEALTH_QUESTION_51.16': 'Have you ever been diagnosed with migraines?',
'HEALTH_QUESTION_51.16_CHOICES': _DIAGNOSIS_CHOICE,
'HEALTH_QUESTION_51.17': 'Have you ever been diagnosed with kidney disease?',
'HEALTH_QUESTION_51.17_CHOICES': _DIAGNOSIS_CHOICE,
'HEALTH_QUESTION_51.18': 'Have you ever been diagnosed with liver disease?',
'HEALTH_QUESTION_51.18_CHOICES': _DIAGNOSIS_CHOICE,
'HEALTH_QUESTION_51.19': 'Have you ever been diagnosed with phenylketonuria?',
'HEALTH_QUESTION_51.19_CHOICES': _DIAGNOSIS_CHOICE,
'HEALTH_QUESTION_51.20': 'Have you ever been diagnosed with small intestinal bacterial overgrowth (SIBO)?',
'HEALTH_QUESTION_51.20_CHOICES': _DIAGNOSIS_CHOICE,
'HEALTH_QUESTION_51.21': 'Have you ever been diagnosed with skin Condition?',
'HEALTH_QUESTION_51.21_CHOICES': _DIAGNOSIS_CHOICE,
'HEALTH_QUESTION_51.22': 'Have you ever been diagnosed with thyroid Disease?',
'HEALTH_QUESTION_51.22_CHOICES': _DIAGNOSIS_CHOICE,
'HEALTH_QUESTION_51.23': 'Have you ever been diagnosed with any other relevant condition?',
'HEALTH_QUESTION_51.23_CHOICES': _DIAGNOSIS_CHOICE,
'HEALTH_QUESTION_53': 'Are you willing to be contacted to answer additional questions about the conditions listed above?',
'HEALTH_QUESTION_53_CHOICES': _YES_NO_CHOICES,
'HEALTH_QUESTION_54': 'Do you have seasonal allergies?',
'HEALTH_QUESTION_54_CHOICES': _YES_NO_CHOICES,
'HEALTH_QUESTION_55': 'Do you have any of the following non-food allergies? (mark all that apply)',
'HEALTH_QUESTION_55_CHOICES': (_NO_RESPONSE_CHOICE,
'Drug (e.g. Penicillin)',
'Pet dander',
'Beestings',
'Poison ivy/oak',
'Sun'),
# Detailed Dietary information
'DETAILED_DIET_TITLE': 'Detailed Dietary information',
'DETAILED_DIET_QUESTION_56': 'Are you an infant who receives most of their nutrition from breast milk or formula, or an adult who receives most (more than 75% of daily calories) of their nutrition from adult nutritional shakes (i.e. Ensure)?',
'DETAILED_DIET_QUESTION_56_CHOICES': (_NO_RESPONSE_CHOICE,
'Yes',
'No',
'I eat both solid food and formula/breast milk'),
'DETAILED_DIET_QUESTION_57': 'In an average week, how often do you consume meat/eggs?',
'DETAILED_DIET_QUESTION_57_CHOICES': _FREQUENCY_WEEK_CHOICES,
'DETAILED_DIET_QUESTION_58': 'In an average week, how often do you cook and consume home cooked meals? (Exclude ready-to-eat meals like boxed macaroni and cheese, ramen noodles, lean cuisine)',
'DETAILED_DIET_QUESTION_58_CHOICES': _FREQUENCY_WEEK_CHOICES,
'DETAILED_DIET_QUESTION_59': 'In an average week, how often do you consume ready-to-eat meals (i.e macaroni and cheese, ramen noodles, lean cuisine)?',
'DETAILED_DIET_QUESTION_59_CHOICES': _FREQUENCY_WEEK_CHOICES,
'DETAILED_DIET_QUESTION_60': 'In an average week, how often do you eat food prepared at a restaurant, including carry-out/take-out?',
'DETAILED_DIET_QUESTION_60_CHOICES': _FREQUENCY_WEEK_CHOICES,
'DETAILED_DIET_QUESTION_61': 'In an average week, how often do you eat at least 2 servings of whole grains in a day?',
'DETAILED_DIET_QUESTION_61_CHOICES': _FREQUENCY_WEEK_CHOICES,
'DETAILED_DIET_QUESTION_62': 'In an average week, how often to you consume at least 2-3 servings of fruit in a day?',
'DETAILED_DIET_QUESTION_62_CHOICES': _FREQUENCY_WEEK_CHOICES,
'DETAILED_DIET_QUESTION_63': 'In an average week, how often do you consume at least 2-3 servings of vegetables, including potatoes in a day?',
'DETAILED_DIET_QUESTION_63_CHOICES': _FREQUENCY_WEEK_CHOICES,
'DETAILED_DIET_QUESTION_64': 'How often do you consume one or more servings of fermented vegetables in or plant products a day in an average week?',
'DETAILED_DIET_QUESTION_64_CHOICES': _FREQUENCY_WEEK_CHOICES,
'DETAILED_DIET_QUESTION_65': 'In an average week, how often do you consume at least 2 servings of milk or cheese a day?',
'DETAILED_DIET_QUESTION_65_CHOICES': _FREQUENCY_WEEK_CHOICES,
'DETAILED_DIET_QUESTION_66': 'In an average week, how often do you consume milk substitutes (soy milk, lactose free milk, almond milk, etc.)?',
'DETAILED_DIET_QUESTION_66_CHOICES': _FREQUENCY_WEEK_CHOICES,
'DETAILED_DIET_QUESTION_67': 'How often do you eat frozen desserts (ice cream/gelato/milkshakes, sherbet/sorbet, frozen yogurt, etc.)?',
'DETAILED_DIET_QUESTION_67_CHOICES': _FREQUENCY_WEEK_CHOICES,
'DETAILED_DIET_QUESTION_68': 'In an average week, how often do you eat red meat?',
'DETAILED_DIET_QUESTION_68_CHOICES': _FREQUENCY_WEEK_CHOICES,
'DETAILED_DIET_QUESTION_69': 'In an average week, how often do you consume higher fat red meats like prime rib, T-bone steak, hamburger, ribs, bacon, etc.?',
'DETAILED_DIET_QUESTION_69_CHOICES': _FREQUENCY_WEEK_CHOICES,
'DETAILED_DIET_QUESTION_70': 'How many days in an average week do you consume chicken or turkey at least once a day?',
'DETAILED_DIET_QUESTION_70_CHOICES': _FREQUENCY_WEEK_CHOICES,
'DETAILED_DIET_QUESTION_71': 'How many days in an average week do you consume seafood (fish, shrimp, lobster, crab, etc.)?',
'DETAILED_DIET_QUESTION_71_CHOICES': _FREQUENCY_WEEK_CHOICES,
'DETAILED_DIET_QUESTION_72': 'How many days in an average week do you consume salted snacks (potato chips, nacho chips, corn chips, popcorn with butter, French fries etc.)?',
'DETAILED_DIET_QUESTION_72_CHOICES': _FREQUENCY_WEEK_CHOICES,
'DETAILED_DIET_QUESTION_73': 'How many days in an average week do you consume sugary sweets (cake, cookies, pastries, donuts, muffins, chocolate etc.) at least once a day',
'DETAILED_DIET_QUESTION_73_CHOICES': _FREQUENCY_WEEK_CHOICES,
'DETAILED_DIET_QUESTION_74': 'Cook with olive oil?',
'DETAILED_DIET_QUESTION_74_CHOICES': _FREQUENCY_WEEK_CHOICES,
'DETAILED_DIET_QUESTION_75': 'Consume whole eggs (does not include egg beaters or just egg whites).',
'DETAILED_DIET_QUESTION_75_CHOICES': _FREQUENCY_WEEK_CHOICES,
'DETAILED_DIET_QUESTION_76': 'Drink 16 ounces or more of sugar sweetened beverages such as non-diet soda or fruit drink/punch (however, not including 100 % fruit juice) in a day?',
'DETAILED_DIET_QUESTION_76_CHOICES': _FREQUENCY_WEEK_CHOICES,
'DETAILED_DIET_QUESTION_77': 'Consume at least 1L (~32 ounces) of water in a day?',
'DETAILED_DIET_QUESTION_77_CHOICES': _FREQUENCY_WEEK_CHOICES,
# Supplemental questions
'SUPPLEMENTAL_DIET': 'Dietary Supplements/Dietary Restrictions:',
'SUPPLEMENTAL_RICE': 'Race/ethnicity:',
'SUPPLEMENTAL_MEDICATION': 'Over the counter and prescription medication:',
'SUPPLEMENTAL_TRAVEL': 'Travel:',
'SUPPLEMENTAL_RELATIONSHIP': 'What is your relationship to other people in this study who have voluntarily told you of their participation (e.g. partner, children, roommates)? For children, please specify whether or not you are genetically related. Note that we will only use information that both parties provide.',
'SUPPLEMENTAL_PETS': 'Pets',
'SUPPLEMENTAL_PETS_INOUTDOOR': 'Indoor/outdoor or confined (cage/tank):',
'SUPPLEMENTAL_PETS_CONTACT': 'Contact extent:',
'SUPPLEMENTAL_ANTIBIOTICS': 'Antibiotic/s:',
'SUPPLEMENTAL_ANTIBIOTICS_NAME': 'Name:',
'SUPPLEMENTAL_ANTIBIOTICS_TREATMENT': 'Treatment for:',
'SUPPLEMENTAL_PREGNANCY': 'Pregnancy due date:',
'SUPPLEMENTAL_OTHER_CONDITIONS': 'Other conditions you suffer from that were not listed in the diagnosed conditions question',
'SUPPLEMENTAL_OPEN_COMMENT': 'Please write anything else about yourself that you think could affect your personal microorganisms.'
}
_SURVEY_MAIN = {
'TITLE': 'Survey',
'ONCE_YOU_START': 'Once you start this survey, you must complete it. Your answers will <strong>not</strong> be saved unless you complete the entire survey.',
'TIME_COMMITMENT': 'We anticipate that participant time commitment for completing the questionnaire online will take no more than <strong>45 minutes</strong>.',
'TYPES_OF_QUESTIONS': 'You will be asked questions about your general personal information (name, age, sex, height, weight, ethnicity, place of birth, and current ZIP or equivalent code). We will ask if you recently moved and where you moved from. We will ask questions about general diet information (including whether you follow a special diet, if you have food allergies, whether you have cultural or religious food restrictions). Other questions address whether you have pets and the type of contact you have with these pets and your relationship to other people in this study. There is a section on health information including a history of allergies/asthma, if you suffer from migraines and if you have a history of irritable bowel disease.',
'YOU_MAY_DECLINE': 'You may decline to answer any question by not selecting an answer.',
'OTHER_SURVEYS': 'Following the questionnaire, you will be presented with a few other focused surveys. As with everything, those surveys are optional but your responses could help improve our understanding of the microbiome.'
}
# Actual text locale
text_locale = {
'404.html': _404,
'FAQ.html': _FAQ,
'new_participant_overview.html': _NEW_PARTICIPANT_OVERVIEW,
'addendum.html': _ADDENDUM,
'portal.html': _PORTAL,
'db_error.html': _DB_ERROR,
'retrieve_kitid.html': _RETREIVE_KITID,
'add_sample.html': _ADD_SAMPLE,
'error.html': _ERROR,
'forgot_password.html': _FORGOT_PASSWORD,
'help_request.html': _HELP_REQUEST,
'new_participant.html': _NEW_PARTICIPANT,
'international.html': _INTERNATIONAL,
'add_sample_overview.html': _ADD_SAMPLE_OVERIVIEW,
'participant_overview.html': _PARTICIPANT_OVERVIEW,
'sample_overview.html': _SAMPLE_OVERVIEW,
'taxa_summary.html': _TAXA_SUMMARY,
'map.html': _MAP,
'human_survey.html': _HUMAN_SURVEY,
'human_survey_completed.html': _HUMAN_SURVEY_COMPLETED,
'register_user.html': _REGISTER_USER,
'chage_pass_verify.html': _CHANGE_PASS_VERIFY,
'survey_main.html': _SURVEY_MAIN,
'animal_survey.html': _ANIMAL_SURVEY,
'handlers': _HANDLERS
}
| bsd-3-clause |
shangwuhencc/scikit-learn | sklearn/linear_model/tests/test_randomized_l1.py | 214 | 4690 | # Authors: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.linear_model.randomized_l1 import (lasso_stability_path,
RandomizedLasso,
RandomizedLogisticRegression)
from sklearn.datasets import load_diabetes, load_iris
from sklearn.feature_selection import f_regression, f_classif
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model.base import center_data
diabetes = load_diabetes()
X = diabetes.data
y = diabetes.target
X = StandardScaler().fit_transform(X)
X = X[:, [2, 3, 6, 7, 8]]
# test that the feature score of the best features
F, _ = f_regression(X, y)
def test_lasso_stability_path():
# Check lasso stability path
# Load diabetes data and add noisy features
scaling = 0.3
coef_grid, scores_path = lasso_stability_path(X, y, scaling=scaling,
random_state=42,
n_resampling=30)
assert_array_equal(np.argsort(F)[-3:],
np.argsort(np.sum(scores_path, axis=1))[-3:])
def test_randomized_lasso():
# Check randomized lasso
scaling = 0.3
selection_threshold = 0.5
# or with 1 alpha
clf = RandomizedLasso(verbose=False, alpha=1, random_state=42,
scaling=scaling,
selection_threshold=selection_threshold)
feature_scores = clf.fit(X, y).scores_
assert_array_equal(np.argsort(F)[-3:], np.argsort(feature_scores)[-3:])
# or with many alphas
clf = RandomizedLasso(verbose=False, alpha=[1, 0.8], random_state=42,
scaling=scaling,
selection_threshold=selection_threshold)
feature_scores = clf.fit(X, y).scores_
assert_equal(clf.all_scores_.shape, (X.shape[1], 2))
assert_array_equal(np.argsort(F)[-3:], np.argsort(feature_scores)[-3:])
X_r = clf.transform(X)
X_full = clf.inverse_transform(X_r)
assert_equal(X_r.shape[1], np.sum(feature_scores > selection_threshold))
assert_equal(X_full.shape, X.shape)
clf = RandomizedLasso(verbose=False, alpha='aic', random_state=42,
scaling=scaling)
feature_scores = clf.fit(X, y).scores_
assert_array_equal(feature_scores, X.shape[1] * [1.])
clf = RandomizedLasso(verbose=False, scaling=-0.1)
assert_raises(ValueError, clf.fit, X, y)
clf = RandomizedLasso(verbose=False, scaling=1.1)
assert_raises(ValueError, clf.fit, X, y)
def test_randomized_logistic():
# Check randomized sparse logistic regression
iris = load_iris()
X = iris.data[:, [0, 2]]
y = iris.target
X = X[y != 2]
y = y[y != 2]
F, _ = f_classif(X, y)
scaling = 0.3
clf = RandomizedLogisticRegression(verbose=False, C=1., random_state=42,
scaling=scaling, n_resampling=50,
tol=1e-3)
X_orig = X.copy()
feature_scores = clf.fit(X, y).scores_
assert_array_equal(X, X_orig) # fit does not modify X
assert_array_equal(np.argsort(F), np.argsort(feature_scores))
clf = RandomizedLogisticRegression(verbose=False, C=[1., 0.5],
random_state=42, scaling=scaling,
n_resampling=50, tol=1e-3)
feature_scores = clf.fit(X, y).scores_
assert_array_equal(np.argsort(F), np.argsort(feature_scores))
def test_randomized_logistic_sparse():
# Check randomized sparse logistic regression on sparse data
iris = load_iris()
X = iris.data[:, [0, 2]]
y = iris.target
X = X[y != 2]
y = y[y != 2]
# center here because sparse matrices are usually not centered
X, y, _, _, _ = center_data(X, y, True, True)
X_sp = sparse.csr_matrix(X)
F, _ = f_classif(X, y)
scaling = 0.3
clf = RandomizedLogisticRegression(verbose=False, C=1., random_state=42,
scaling=scaling, n_resampling=50,
tol=1e-3)
feature_scores = clf.fit(X, y).scores_
clf = RandomizedLogisticRegression(verbose=False, C=1., random_state=42,
scaling=scaling, n_resampling=50,
tol=1e-3)
feature_scores_sp = clf.fit(X_sp, y).scores_
assert_array_equal(feature_scores, feature_scores_sp)
| bsd-3-clause |
ibayer/fastFM-fork | fastFM/tests/test_datasets.py | 2 | 1072 | # Author: Immanuel Bayer
# License: BSD 3 clause
from fastFM.datasets import make_user_item_regression
from sklearn.metrics import mean_squared_error
import scipy.sparse as sp
def test_make_user_item_regression():
from fastFM.mcmc import FMRegression
X, y, coef = make_user_item_regression(label_stdev=0)
from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.33, random_state=42)
fm = FMRegression(rank=2)
y_pred = fm.fit_predict(sp.csc_matrix(X_train), y_train, sp.csc_matrix(X_test))
# generate data with noisy lables
X, y, coef = make_user_item_regression(label_stdev=2)
from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.33, random_state=42)
fm = FMRegression(rank=2)
y_pred_noise = fm.fit_predict(sp.csc_matrix(X_train), y_train, sp.csc_matrix(X_test))
assert mean_squared_error(y_pred_noise, y_test) > \
mean_squared_error(y_pred, y_test)
| bsd-3-clause |
Fornost461/drafts-and-stuff | Python/finance/investissements/recurring_investment.py | 1 | 5227 | #!/usr/bin/env python3
from ExampleForACertainPeriod import ExampleForACertainPeriod, GCD
def divisors_of(n):
assert n >= 1
return [d for d in range(1, n + 1) if n % d == 0]
# Parameters
# account_type = None
# account_type = 'PEA'
account_type = 'AV'
saved_per_month = 25 # currency amount
# saved_per_month = 0 # currency amount
duration_in_months = 12*8
# duration_in_months = 12
opening_balance = 2600 # currency amount
# opening_balance = 100 # currency amount
# I think in this simulator, you should stick to a single product in the portfolio.
# Indeed, without rebalancing, using several products in the portfolio is likely to lead to an unbalanced portfolio.
# percentages
portfolio_wanted_percentages = [100]
portfolio_return_rates_per_year = [15.31]
# portfolio_wanted_percentages = [90, 10]
# portfolio_return_rates_per_year = [10, 3]
# period_sizes_to_try = [0.5] + list(range(1, 25)) # in months
# period_sizes_to_try = [x for x in (0.25 * i for i in range(1, 12)) if duration_in_months % x == 0] # in months
# period_sizes_to_try = [12] # in months
period_sizes_to_try = [0.25, 0.5] + divisors_of(duration_in_months) # in months
# period_sizes_to_try = divisors_of(duration_in_months) # in months
# This simulator seems too unrealistic for the rebalancing simulation to be useful.
rebalance_periods_to_try = [None] # do not simulate rebalancing
# rebalance_periods_to_try = list(range(1, 14)) # in months
# rebalance_periods_to_try = list(range(1, 14)) + [15, 18, 24, 36, 72, 100] # in months
# rebalance_periods_to_try = [1, 3, 6, 12] # in months
tick = None
# show_plot = False
show_plot = True
x_axis = period_sizes_to_try
# x_axis = rebalance_periods_to_try
show_many_examples = True
# show_many_examples = False
fees_rate_per_year_PEA = 0 # percentage for PEA
# fee_by_contribution_PEA = 1 # currency amount for PEA
fee_by_contribution_PEA = 0 # currency amount for PEA
# withdrawal_fees_PEA = 6 # currency amount
withdrawal_fees_PEA = 0 # currency amount
# withdrawal_fees_PEA = 6+4*4*duration_in_months/12*fee_by_contribution_PEA # currency amount
fees_rate_per_year_AV = 0.6 # percentage for AV
# fee_by_contribution_AV = 0 # currency amount for AV
fee_by_contribution_AV = '0.1 %' # currency amount for AV TODO
# withdrawal_fees_AV = 6 # currency amount
withdrawal_fees_AV = 0 # currency amount
if not account_type:
fees_rate_per_year = 0 # percentage
fee_by_contribution = 0.99 # currency amount
withdrawal_fees = 6 # currency amount
elif account_type == 'PEA':
fees_rate_per_year = fees_rate_per_year_PEA
fee_by_contribution = fee_by_contribution_PEA
withdrawal_fees = withdrawal_fees_PEA
else:
assert account_type == 'AV'
fees_rate_per_year = fees_rate_per_year_AV
fee_by_contribution = fee_by_contribution_AV
withdrawal_fees = withdrawal_fees_AV
# Code
print(f'expected total contributions (including fees) {saved_per_month * duration_in_months}\n')
# (normally, amount reached when period size is a divisor of the total duration)
if show_many_examples:
examples = []
for period_size_in_months in period_sizes_to_try:
for rebalance_period_in_months in rebalance_periods_to_try:
examples.append(ExampleForACertainPeriod(saved_per_month, duration_in_months, opening_balance, withdrawal_fees, portfolio_wanted_percentages, portfolio_return_rates_per_year, rebalance_period_in_months, fees_rate_per_year, fee_by_contribution, period_size_in_months, tick, account_type))
print(examples[-1])
# best = max(examples, key = lambda example: example.net)
best = max(examples, key = lambda example: example.net)
print(f'\nbest:\n\n{best}\n\nrepr: {best.__repr__()}')
if show_plot:
import matplotlib.pyplot as plt
plt.plot(x_axis, [x.net for x in examples])
plt.show()
else:
# chosen_example_for_PEA = ExampleForACertainPeriod(saved_per_month, duration_in_months, opening_balance, withdrawal_fees, portfolio_wanted_percentages, portfolio_return_rates_per_year, rebalance_period_in_months, fees_rate_per_year = fees_rate_per_year_PEA, fee_by_contribution = fee_by_contribution_PEA, period_size_in_months = 3, tick = tick, account_type = 'PEA')
chosen_example_for_PEA = ExampleForACertainPeriod(saved_per_month = 25, duration_in_months = 96, opening_balance = 2600, withdrawal_fees = 0, portfolio_wanted_percentages = [100], portfolio_return_rates_per_year = [15.31], rebalance_period_in_months = None, fees_rate_per_year = 0, fee_by_contribution = 0, period_size_in_months = 0.25, tick = 0.25, account_type = 'PEA')
chosen_example_for_AV = ExampleForACertainPeriod(saved_per_month, duration_in_months, opening_balance, withdrawal_fees, portfolio_wanted_percentages, portfolio_return_rates_per_year, rebalance_period_in_months, fees_rate_per_year = fees_rate_per_year_AV, fee_by_contribution = fee_by_contribution_AV, period_size_in_months = 0.25, tick = tick, account_type = 'AV')
print(chosen_example_for_PEA)
print(chosen_example_for_AV)
| cc0-1.0 |
treycausey/scikit-learn | sklearn/linear_model/tests/test_sparse_coordinate_descent.py | 1 | 10010 | import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import ignore_warnings
from sklearn.linear_model.coordinate_descent import (Lasso, ElasticNet,
LassoCV, ElasticNetCV)
def test_sparse_coef():
""" Check that the sparse_coef propery works """
clf = ElasticNet()
clf.coef_ = [1, 2, 3]
assert_true(sp.isspmatrix(clf.sparse_coef_))
assert_equal(clf.sparse_coef_.todense().tolist()[0], clf.coef_)
def test_normalize_option():
""" Check that the normalize option in enet works """
X = sp.csc_matrix([[-1], [0], [1]])
y = [-1, 0, 1]
clf_dense = ElasticNet(fit_intercept=True, normalize=True)
clf_sparse = ElasticNet(fit_intercept=True, normalize=True)
clf_dense.fit(X, y)
X = sp.csc_matrix(X)
clf_sparse.fit(X, y)
assert_almost_equal(clf_dense.dual_gap_, 0)
assert_array_almost_equal(clf_dense.coef_, clf_sparse.coef_)
def test_lasso_zero():
"""Check that the sparse lasso can handle zero data without crashing"""
X = sp.csc_matrix((3, 1))
y = [0, 0, 0]
T = np.array([[1], [2], [3]])
clf = Lasso().fit(X, y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0])
assert_array_almost_equal(pred, [0, 0, 0])
assert_almost_equal(clf.dual_gap_, 0)
def test_enet_toy_list_input():
"""Test ElasticNet for various values of alpha and l1_ratio with list X"""
X = np.array([[-1], [0], [1]])
X = sp.csc_matrix(X)
Y = [-1, 0, 1] # just a straight line
T = np.array([[2], [3], [4]]) # test sample
# this should be the same as unregularized least squares
clf = ElasticNet(alpha=0, l1_ratio=1.0)
# catch warning about alpha=0.
# this is discouraged but should work.
ignore_warnings(clf.fit)(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.3, max_iter=1000)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.45454], 3)
assert_array_almost_equal(pred, [0.9090, 1.3636, 1.8181], 3)
assert_almost_equal(clf.dual_gap_, 0)
def test_enet_toy_explicit_sparse_input():
"""Test ElasticNet for various values of alpha and l1_ratio with sparse
X"""
f = ignore_warnings
# training samples
X = sp.lil_matrix((3, 1))
X[0, 0] = -1
# X[1, 0] = 0
X[2, 0] = 1
Y = [-1, 0, 1] # just a straight line (the identity function)
# test samples
T = sp.lil_matrix((3, 1))
T[0, 0] = 2
T[1, 0] = 3
T[2, 0] = 4
# this should be the same as lasso
clf = ElasticNet(alpha=0, l1_ratio=1.0)
f(clf.fit)(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.3, max_iter=1000)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.45454], 3)
assert_array_almost_equal(pred, [0.9090, 1.3636, 1.8181], 3)
assert_almost_equal(clf.dual_gap_, 0)
def make_sparse_data(n_samples=100, n_features=100, n_informative=10, seed=42,
positive=False, n_targets=1):
random_state = np.random.RandomState(seed)
# build an ill-posed linear regression problem with many noisy features and
# comparatively few samples
# generate a ground truth model
w = random_state.randn(n_features, n_targets)
w[n_informative:] = 0.0 # only the top features are impacting the model
if positive:
w = np.abs(w)
X = random_state.randn(n_samples, n_features)
rnd = random_state.uniform(size=(n_samples, n_features))
X[rnd > 0.5] = 0.0 # 50% of zeros in input signal
# generate training ground truth labels
y = np.dot(X, w)
X = sp.csc_matrix(X)
if n_targets == 1:
y = np.ravel(y)
return X, y
def _test_sparse_enet_not_as_toy_dataset(alpha, fit_intercept, positive):
n_samples, n_features, max_iter = 100, 100, 1000
n_informative = 10
X, y = make_sparse_data(n_samples, n_features, n_informative,
positive=positive)
X_train, X_test = X[n_samples / 2:], X[:n_samples / 2]
y_train, y_test = y[n_samples / 2:], y[:n_samples / 2]
s_clf = ElasticNet(alpha=alpha, l1_ratio=0.8, fit_intercept=fit_intercept,
max_iter=max_iter, tol=1e-7, positive=positive,
warm_start=True)
s_clf.fit(X_train, y_train)
assert_almost_equal(s_clf.dual_gap_, 0, 4)
assert_greater(s_clf.score(X_test, y_test), 0.85)
# check the convergence is the same as the dense version
d_clf = ElasticNet(alpha=alpha, l1_ratio=0.8, fit_intercept=fit_intercept,
max_iter=max_iter, tol=1e-7, positive=positive,
warm_start=True)
d_clf.fit(X_train.todense(), y_train)
assert_almost_equal(d_clf.dual_gap_, 0, 4)
assert_greater(d_clf.score(X_test, y_test), 0.85)
assert_almost_equal(s_clf.coef_, d_clf.coef_, 5)
assert_almost_equal(s_clf.intercept_, d_clf.intercept_, 5)
# check that the coefs are sparse
assert_less(np.sum(s_clf.coef_ != 0.0), 2 * n_informative)
def test_sparse_enet_not_as_toy_dataset():
_test_sparse_enet_not_as_toy_dataset(alpha=0.1, fit_intercept=False,
positive=False)
_test_sparse_enet_not_as_toy_dataset(alpha=0.1, fit_intercept=True,
positive=False)
_test_sparse_enet_not_as_toy_dataset(alpha=1e-3, fit_intercept=False,
positive=True)
_test_sparse_enet_not_as_toy_dataset(alpha=1e-3, fit_intercept=True,
positive=True)
def test_sparse_lasso_not_as_toy_dataset():
n_samples = 100
max_iter = 1000
n_informative = 10
X, y = make_sparse_data(n_samples=n_samples, n_informative=n_informative)
X_train, X_test = X[n_samples / 2:], X[:n_samples / 2]
y_train, y_test = y[n_samples / 2:], y[:n_samples / 2]
s_clf = Lasso(alpha=0.1, fit_intercept=False, max_iter=max_iter, tol=1e-7)
s_clf.fit(X_train, y_train)
assert_almost_equal(s_clf.dual_gap_, 0, 4)
assert_greater(s_clf.score(X_test, y_test), 0.85)
# check the convergence is the same as the dense version
d_clf = Lasso(alpha=0.1, fit_intercept=False, max_iter=max_iter, tol=1e-7)
d_clf.fit(X_train.todense(), y_train)
assert_almost_equal(d_clf.dual_gap_, 0, 4)
assert_greater(d_clf.score(X_test, y_test), 0.85)
# check that the coefs are sparse
assert_equal(np.sum(s_clf.coef_ != 0.0), n_informative)
def test_enet_multitarget():
n_targets = 3
X, y = make_sparse_data(n_targets=n_targets)
estimator = ElasticNet(alpha=0.01, fit_intercept=True, precompute=None)
# XXX: There is a bug when precompute is not None!
estimator.fit(X, y)
coef, intercept, dual_gap = (estimator.coef_,
estimator.intercept_,
estimator.dual_gap_)
for k in range(n_targets):
estimator.fit(X, y[:, k])
assert_array_almost_equal(coef[k, :], estimator.coef_)
assert_array_almost_equal(intercept[k], estimator.intercept_)
assert_array_almost_equal(dual_gap[k], estimator.dual_gap_)
def test_path_parameters():
X, y = make_sparse_data()
max_iter = 50
n_alphas = 10
clf = ElasticNetCV(n_alphas=n_alphas, eps=1e-3, max_iter=max_iter,
l1_ratio=0.5, fit_intercept=False)
ignore_warnings(clf.fit)(X, y) # new params
assert_almost_equal(0.5, clf.l1_ratio)
assert_equal(n_alphas, clf.n_alphas)
assert_equal(n_alphas, len(clf.alphas_))
sparse_mse_path = clf.mse_path_
ignore_warnings(clf.fit)(X.toarray(), y) # compare with dense data
assert_almost_equal(clf.mse_path_, sparse_mse_path)
def test_same_output_sparse_dense_lasso_and_enet_cv():
X, y = make_sparse_data(n_samples=50, n_features=10)
for normalize in [True, False]:
clfs = ElasticNetCV(max_iter=100, cv=10, normalize=normalize)
ignore_warnings(clfs.fit)(X, y)
clfd = ElasticNetCV(max_iter=100, cv=10, normalize=normalize)
ignore_warnings(clfd.fit)(X.todense(), y)
assert_almost_equal(clfs.alpha_, clfd.alpha_, 7)
assert_almost_equal(clfs.intercept_, clfd.intercept_, 7)
assert_array_almost_equal(clfs.mse_path_, clfd.mse_path_)
assert_array_almost_equal(clfs.alphas_, clfd.alphas_)
clfs = LassoCV(max_iter=100, cv=10, normalize=normalize)
ignore_warnings(clfs.fit)(X, y)
clfd = LassoCV(max_iter=100, cv=10, normalize=normalize)
ignore_warnings(clfd.fit)(X.todense(), y)
assert_almost_equal(clfs.alpha_, clfd.alpha_, 7)
assert_almost_equal(clfs.intercept_, clfd.intercept_, 7)
assert_array_almost_equal(clfs.mse_path_, clfd.mse_path_)
assert_array_almost_equal(clfs.alphas_, clfd.alphas_)
| bsd-3-clause |
kastnerkyle/COCORA2012 | gui.py | 1 | 17016 | #!/usr/bin/python
import sys
from PyQt4 import QtGui as qtg
from PyQt4 import QtCore as qtc
from numpy import arange, sin, pi
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.figure import Figure
from matplotlib.patches import Rectangle
from matplotlib.ticker import FuncFormatter
import ExampleAlg
import numpy as np
import collections
import types
FPATH = "_05.wav"
class MplCanvas(FigureCanvas):
"""Ultimately, this is a QWidget (as well as a FigureCanvasAgg, etc.)."""
def __init__(self, parent=None, width=5, height=4, dpi=100):
self.fig = Figure(figsize=(width, height), dpi=dpi, facecolor='w')
self.axes = self.fig.add_subplot(1,1,1)
#Equivalent to hold(off) in MATLAB, i.e. each plot is fresh
#without showing old data
self.axes.hold(False)
#Plot color order. For more information
#see http://matplotlib.sourceforge.net/api/axes_api.html#matplotlib.axes.Axes.plot
self.colors = ['b', 'r', 'g', 'c', 'm']
#Zoom box color information
self.zoom_color = 'y'
#State variables must be here in order to retain state between
#plot calls
self.alg = ExampleAlg.ExampleAlg(FPATH)
self.zoom = {"x":[],
"y":[]}
#State flag to see if zooming mode is active. Set in the left_pressed
#when the event for left_held is connected, then released when
#left_released is called
self.zooming = None
#Zoom_box holds the x and y values for current zoom box when
#self.zooming == True
self.zoom_box = {"x":{},
"y":{}}
self.zoom_box["x"] = {"data_coords":[],
"axes_coords":[]}
self.zoom_box["y"] = {"data_coords":[],
"axes_coords":[]}
#State storage for the current cursor position in data coordinates
self.cursor_data = {}
self.cursor_data["x"] = 0
self.cursor_data["y"] = 0
#Setting to hold number of channels coming from algorithm
self.num_chans = 0
#Array which wil hold T/F values for which channels to display
self.display_chans = []
#Maximum zoom is 0, x_max and 0, y_max for the x and y axes
self.x_max = 0
self.y_max = 0
FigureCanvas.__init__(self, self.fig)
self.setParent(parent)
class DynamicMplCanvas(MplCanvas):
""" A canvas that updates itself every X seconds with a new plot. """
def __init__(self, *args, **kwargs):
#Initialize parent
MplCanvas.__init__(self, *args, **kwargs)
#Set initial plot and initial states
self.compute_initial_figure()
#Create dynamic canvas and start plotting, set timer for graph updates
timer = qtc.QTimer(self)
qtc.QObject.connect(timer,qtc.SIGNAL("timeout()"),self.update_figure)
X = 750 #in milliseconds
timer.start(X)
def draw_figure(self, data):
""" Handles all the drawing code that is shared by the initial plotting
and the dynamic plotting. """
#Link channels in order with the colors list presented by self.colors.
#Note that if data is shorter than colors list, the end channels will
#"disappear"
#TODO: Add skip list to silence channels during runtime
display = self.display_chans
colors = self.colors
args = []
for tg, ch, col in zip(display, data, colors):
if tg == True:
args.append(ch)
args.append(col)
self.axes.plot(*args)
#xs and ys hold the state values for what we want the zoom to be
self.axes.set_xlim(self.zoom["x"][0], self.zoom["x"][1])
self.axes.set_ylim(self.zoom["y"][0], self.zoom["y"][1])
#Display X axes in units of frequency, but we want to leave all the state storage and algorithmic stuff in bin units
#self.axes.xaxis.set_major_formatter(FuncFormatter(lambda x, pos: x*float(self.alg.framerate)/self.alg.fftlen))
#Draw lines for zooming rectangle, with one axis being in data coords
#and the other being in axes coords - see
#http://matplotlib.sourceforge.net/api/pyplot_api.html#matplotlib.pyplot.axhspan
if self.zooming != None:
try:
self.axes.axhspan(self.zoom_box["y"]["data_coords"][0],
self.zoom_box["y"]["data_coords"][1],
self.zoom_box["x"]["axes_coords"][0],
self.zoom_box["x"]["axes_coords"][1],
color=self.zoom_color,
alpha=.5)
self.axes.axvspan(self.zoom_box["x"]["data_coords"][0],
self.zoom_box["x"]["data_coords"][1],
self.zoom_box["y"]["axes_coords"][0],
self.zoom_box["y"]["axes_coords"][1],
color=self.zoom_color,
alpha=.5)
except IndexError:
#Ignore indexing exceptions - sometimes zoom_box has not been
#filled when plot is called
pass
#Create text in the bottom left that show the data coordinates which the
#mouse is currently hovering over
x = "%s" % float("%.2f" % self.cursor_data["x"])
y = "%s" % float("%.2f" % self.cursor_data["y"])
self.axes.text(-.1, -.1, "x="+x+" y="+y, transform = self.axes.transAxes)
self.draw()
def compute_initial_figure(self):
"""Initialize figure and set maximum X and maximum Y"""
#Get first result from algorithm
self.alg.start()
res = self.alg.run()
#Get number of chans in order to set up toggle boxes
self.num_chans = len(res)
self.display_chans = [False for i in range(self.num_chans)]
#Find maximum value of all channels, excluding DC term ([1:])
max_max = max(map(lambda x: max(x[1:]), res))
#Find length of longest channel
self.x_max = max(map(len, res))
#1.05 is a cushion value so that we can see all of the data at
#farthest zoom out
self.y_max = 1.05*max_max
#Set zoom state to maximum zoom out
self.zoom["x"] = [0, self.x_max]
self.zoom["y"] = [0, self.y_max]
self.axes.set_xlim(self.zoom["x"][0], self.zoom["x"][1])
self.axes.set_ylim(self.zoom["y"][0], self.zoom["y"][1])
self.draw_figure(res)
def update_figure(self):
""" Plot the new data, and set zoom levels to current state values. """
#Get values for next algorithm process
res = self.alg.run()
#Plot new data using configured color scheme
self.draw_figure(res)
class AlgGui(qtg.QWidget):
""" Main GUI class, defines mouse and keyboard control functionality. """
#To see a tutorial on using the transforms...
#http://matplotlib.sourceforge.net/users/transforms_tutorial.html
def __init__(self):
qtg.QWidget.__init__(self)
self.graph = DynamicMplCanvas(self, width=10, height=10, dpi=100)
#Storage for click coordinates during click state
self.coords = {"x":[],
"y":[]}
self.initUI()
def genEditFunction(self, key, le, mn, mx):
""" Generator function for making a specific textChanged function
in order to connect to a QLineEdit box. Only works for integer
inputs to QLineEdit box. """
def textChanged(string):
#Check that le is between mn and mx
pos = 0
v = qtg.QIntValidator(mn, mx, le)
le.setValidator(v)
#Bounds checking
if v.validate(string, pos) == qtg.QValidator.Invalid:
value = self.graph.alg.adjustable_params[key]["current_value"]
le.setText(str(value))
print("Input of " + str(string) + " is outside range " + str(mn) + "," + str(mx))
else:
try:
self.graph.alg.adjustable_params[key]["current_value"] = int(string)
except ValueError:
#Do this to suppress printing of error when line is blank
pass
return textChanged
def genIdleFunction(self, key, le):
""" Generator for a super simple test of box contents. """
def editingFinished():
if len(le.text()) < 1:
self.graph.alg.adjustable_params[key]["min_value"]
le.setText(str(value))
return editingFinished
def genSliderFunction(self, key, le, mn, mx):
""" Generator function for making the value changed function for a particular slider """
def valueChanged(value):
res = value*mx/100 if value*mx/100 > mn else mn
le.setText(str(res))
self.graph.alg.adjustable_params[key]["current_value"] = res
return valueChanged
def addSliders(self, widgets):
""" Function to add arbitrary number of sliders to the display """
for key in self.graph.alg.adjustable_params.keys():
#Add a label to the widgets dict
widgets[str(key) + "_label"] = qtg.QLabel(str(key))
#Get data extents for bounds checking
mn = self.graph.alg.adjustable_params[key]["min"]
mx = self.graph.alg.adjustable_params[key]["max"]
#Create a line edit widget and connect it to the generated
#textChanged function from the genEditFunction
le = qtg.QLineEdit(self)
edit = self.genEditFunction(key, le, mn, mx)
le.textChanged.connect(edit)
#Set text to min value if editing finishes as blank...
#Currently bugged in Ubuntu 11.10
fin = self.genIdleFunction(key, le)
le.editingFinished.connect(fin)
#Set text to default value
value = self.graph.alg.adjustable_params[key]["current_value"]
le.setText(str(value))
widgets[str(key) + "_current_value"] = le
#Create a slider, connect it to the generated sliderFunction,
#and add it to the widgets dict
sld = qtg.QSlider(qtc.Qt.Horizontal, self)
fn = self.genSliderFunction(key, le, mn, mx)
sld.valueChanged.connect(fn)
widgets[str(key) + "_slider"] = sld
#Add an empty space, so that widgets are better grouped visually
widgets[str(key) + "_spacer"] = qtg.QLabel(" ")
def boundsCheck(self, xdata, ydata):
"""Make sure that zoom boundaries are within data window"""
xdata = self.graph.zoom["x"][0] if xdata < self.graph.zoom["x"][0] else xdata
xdata = self.graph.zoom["x"][1] if xdata > self.graph.zoom["x"][1] else xdata
ydata = self.graph.zoom["y"][0] if ydata < self.graph.zoom["y"][0] else ydata
ydata = self.graph.zoom["y"][1] if ydata > self.graph.zoom["y"][1] else ydata
return (xdata, ydata)
def left_pressed(self, event):
"""Record location where the left click started"""
#Use the transform so we enable the ability to click outside axes,
#as event.xdata = None if event.inaxes == False
#Also make sure not to zoom outside data bounds
if event.button == 1:
xdata, ydata = self.graph.axes.transData.inverted().transform((event.x, event.y))
xdata, ydata = self.boundsCheck(xdata, ydata)
#Add location data to self.coords for storage
self.coords["x"].append(xdata)
self.coords["y"].append(ydata)
#Set the zooming state so it is no longer None
self.graph.zooming = self.graph.mpl_connect("motion_notify_event", self.left_held)
def left_held(self, event):
"""Method for use during zoom event"""
#Get x and y coordinates from data coords where left click started
x_temp, y_temp = self.graph.axes.transData.transform((self.coords["x"][0], self.coords["y"][0]))
#Get x and y data points for where the current event is
x0, y0 = self.graph.axes.transData.inverted().transform((event.x, event.y))
#Save off data coords
self.graph.zoom_box["x"]["data_coords"] = sorted([self.coords["x"][0], x0])
self.graph.zoom_box["y"]["data_coords"] = sorted([self.coords["y"][0], y0])
#Get axes coordinates for where left click started
x1, y1 = self.graph.axes.transAxes.inverted().transform((x_temp, y_temp))
#Get current coordinates of cursor
x2, y2 = self.graph.axes.transAxes.inverted().transform((event.x, event.y))
#Make sure the box is always left, right and lower, higher
self.graph.zoom_box["x"]["axes_coords"] = sorted([x1, x2])
self.graph.zoom_box["y"]["axes_coords"] = sorted([y1, y2])
def left_released(self, event):
"""Record location of click release, then update axes state"""
if event.button == 1:
#Get data coordinate for event. Use this method because event.x and
#event.y return None when event.inaxes == None
xdata, ydata = self.graph.axes.transData.inverted().transform((event.x, event.y))
xdata, ydata = self.boundsCheck(xdata, ydata)
#Append release coordinates to the stored value for where left click
#started.
self.coords["x"].append(xdata)
self.coords["y"].append(ydata)
x_list = self.coords["x"]
y_list = self.coords["y"]
#xs and ys hold the zoom state of the plot, so update those
#TODO: Check that zoom box covers some portion inside the graph
self.graph.zoom["x"] = sorted(x_list)
self.graph.zoom["y"] = sorted(y_list)
#Disconnect event and return zooming flag to None state
self.graph.mpl_disconnect(self.graph.zooming)
self.graph.zooming = None
#Empty out coords, left click is no longer pressed
self.coords["x"] = []
self.coords["y"] = []
def right_pressed(self, event):
"""Zoom out to initial zoom level"""
if event.button == 3:
#Zoom to initial state
self.graph.zoom["x"] = [0, self.graph.x_max]
self.graph.zoom["y"] = [0, self.graph.y_max]
def display_cursor_point(self, event):
"""Show the data coordinate where the mouse cursor is hovering"""
if event.inaxes != None:
self.graph.cursor_data["x"] = event.xdata
self.graph.cursor_data["y"] = event.ydata
def genCheckboxFunction(self, num):
"""Generator for a channel toggle checkboxes. """
def toggleChannel():
self.graph.display_chans[num] = not self.graph.display_chans[num]
return toggleChannel
def addCheckboxes(self, widgets):
"""Add textboxes to passed in collection."""
for i in range(self.graph.num_chans):
cb = qtg.QCheckBox()
widgets['chan_'+str(i)+'checkbox'] = cb
fn = self.genCheckboxFunction(i)
cb.stateChanged.connect(fn)
def initLayout(self):
hbox = qtg.QHBoxLayout()
#Click and drag zooming functions
self.zoom_start = self.graph.mpl_connect("button_press_event", self.left_pressed)
self.zoom_end = self.graph.mpl_connect("button_release_event", self.left_released)
#Undo zoom functions
self.unzoom = self.graph.mpl_connect("button_press_event", self.right_pressed)
#Cursor positional display
self.cursor_pos = self.graph.mpl_connect("motion_notify_event", self.display_cursor_point)
#Plot graphic
hbox.addWidget(self.graph)
vbox = qtg.QVBoxLayout()
hbox.addStretch(1)
hbox.addLayout(vbox)
#Top right widgets, pass in widgets dict so sliders can be added
widgets = collections.OrderedDict()
self.addSliders(widgets)
[vbox.addWidget(x) for x in widgets.values()]
vbox.addStretch(1)
#Bottom right widgets, pass in checbox_widgets so checkboxes can be added
vbox.addWidget(qtg.QLabel("Enable Channels 1 - "+str(self.graph.num_chans)))
hbox_check = qtg.QHBoxLayout()
checkbox_widgets = collections.OrderedDict()
self.addCheckboxes(checkbox_widgets)
[hbox_check.addWidget(x) for x in checkbox_widgets.values()]
vbox.addLayout(hbox_check)
self.setLayout(hbox)
def initUI(self):
#Set window title to the name of the included algorithm
self.setWindowTitle(self.graph.alg.__class__.__name__)
self.initLayout()
self.show()
if __name__ == "__main__":
app = qtg.QApplication(sys.argv)
g = AlgGui()
sys.exit(app.exec_())
| bsd-3-clause |
expectocode/telegram-analysis | activityovertime.py | 2 | 5928 | #!/usr/bin/env python3
"""
A program to plot the activity in a chat over time
"""
import argparse
from json import loads
from datetime import date,timedelta,datetime
from os import path
from collections import defaultdict
import matplotlib.pyplot as plt
from sys import maxsize
def extract_date_and_len(event):
text_date = date.fromtimestamp(event['date'])
text_length = len(event['text'])
return text_date, text_length
def make_ddict_in_range(json_file,binsize,start,end):
"""
return a defaultdict(int) of dates with activity on those dates in a date range
"""
events = (loads(line) for line in json_file)
#generator, so whole file is not put in mem
dates_and_lengths = (extract_date_and_len(event) for event in events if 'text' in event)
dates_and_lengths = ((date,length) for (date,length) in dates_and_lengths if date >= start and date <= end)
counter = defaultdict(int)
#a dict with dates as keys and frequency as values
if binsize > 1:
#this makes binsizes ! > 1 act as 1
curbin = 0
for date_text,length in dates_and_lengths:
if curbin == 0 or (curbin - date_text) > timedelta(days=binsize):
curbin = date_text
counter[curbin] += length
else:
for date_text,length in dates_and_lengths:
counter[date_text] += length
return counter
def parse_args():
parser = argparse.ArgumentParser(
description="Visualise and compare the activity of one or more Telegram chats over time.")
required = parser.add_argument_group('required arguments')
#https://stackoverflow.com/questions/24180527/argparse-required-arguments-listed-under-optional-arguments
required.add_argument(
'-f', '--files',
help='paths to the json file(s) (chat logs) to analyse.',
required = True,
nargs='+'
)
parser.add_argument(
'-o', '--output-folder',
help='the folder to save the activity graph image in.'
'Using this option will make the graph not display on screen.')
parser.add_argument(
'-b', '--bin-size',
help='the number of days to group together as one datapoint. '
'Higher number is more smooth graph, lower number is more spiky. '
'Default 3.',
type=int,default=3)
#and negative bin sizes are = 1
parser.add_argument(
'-s','--figure-size',
help='the size of the figure shown or saved (X and Y size).'
'Choose an appropriate value for your screen size. Default 14 8.',
nargs=2,type=int,default=[14,8]
)
parser.add_argument(
'-d','--date-range',
help='the range of dates you want to look at data between. '
'Must be in format YYYY-MM-DD YYYY-MM-DD with the first date '
'the start of the range, and the second the end. Example: '
"-d '2017-11-20 2017-05-15'. Make sure you don't put a day "
'that is too high for the month eg 30th February.',
default="1000-01-01 4017-01-01"
#hopefully no chatlogs contain these dates :p
)
return parser.parse_args()
def save_figure(folder,filenames):
chats_string = '_'.join(filenames)
if len(chats_string) > 200:
#file name likely to be so long as to cause issues
figname = input(
"This graph is going to have a very long file name. Please enter a custom name(no need to add an extension): ")
else:
figname = "Activity in {}".format(chats_string)
plt.savefig("{}/{}.png".format(folder, figname))
def annotate_figure(filenames,binsize):
if len(filenames) > 1:
plt.title("Activity in {}".format(filenames))
plt.legend(filenames, loc='best')
else:
plt.title("Activity in {}".format(filenames[0]))
if binsize > 1:
plt.ylabel("Activity level (chars per {} days)".format(binsize), size=14)
else:
plt.ylabel("Activity level (chars per day)", size=14)
def get_dates(arg_dates):
if " " not in arg_dates:
print("You must put a space between start and end dates")
exit()
daterange = arg_dates.split()
start_date = datetime.strptime(daterange[0], "%Y-%m-%d").date()
end_date = datetime.strptime(daterange[1], "%Y-%m-%d").date()
return (start_date,end_date)
def main():
"""
main function
"""
args = parse_args()
#set up args
filepaths = args.files
savefolder = args.output_folder
binsize = args.bin_size
figure_size = args.figure_size
start_date,end_date = get_dates(args.date_range)
filenames = []
plt.figure(figsize=figure_size)
for ind,filepath in enumerate(filepaths):
with open(filepath, 'r') as jsonfile:
#if args.date_range is not None:
# chat_counter = make_ddict_in_date_range(
# jsonfile,binsize,start_date,end_date)
#else:
# chat_counter = make_ddict(jsonfile,binsize)
chat_counter = make_ddict_in_range(
jsonfile,binsize,start_date,end_date)
filenames.append(path.splitext(path.split(filepath)[-1])[0])
#make filename just the name of the file,
# with no leading directories and no extension
chat_activity = sorted(chat_counter.items())
#find frequency of chat events per date
plt.plot(*zip(*chat_activity))
plt.grid()
#because i think it looks better with the grid
annotate_figure(filenames,binsize)
if savefolder is not None:
#if there is a given folder to save the figure in, save it there
save_figure(savefolder,filenames)
else:
#if a save folder was not specified, just open a window to display graph
plt.show()
if __name__ == "__main__":
main()
| mit |
liesbethvanherpe/NeuroM | examples/end_to_end_distance.py | 5 | 4395 | #!/usr/bin/env python
# Copyright (c) 2015, Ecole Polytechnique Federale de Lausanne, Blue Brain Project
# All rights reserved.
#
# This file is part of NeuroM <https://github.com/BlueBrain/NeuroM>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of
# its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''Calculate and plot end-to-end distance of neurites.'''
import neurom as nm
from neurom import morphmath
import numpy as np
import matplotlib.pyplot as plt
def path_end_to_end_distance(neurite):
'''Calculate and return end-to-end-distance of a given neurite.'''
trunk = neurite.root_node.points[0]
return max(morphmath.point_dist(l.points[-1], trunk)
for l in neurite.root_node.ileaf())
def mean_end_to_end_dist(neurites):
'''Calculate mean end to end distance for set of neurites.'''
return np.mean([path_end_to_end_distance(n) for n in neurites])
def make_end_to_end_distance_plot(nb_segments, end_to_end_distance, neurite_type):
'''Plot end-to-end distance vs number of segments'''
plt.figure()
plt.plot(nb_segments, end_to_end_distance)
plt.title(neurite_type)
plt.xlabel('Number of segments')
plt.ylabel('End-to-end distance')
plt.show()
def calculate_and_plot_end_to_end_distance(neurite):
'''Calculate and plot the end-to-end distance vs the number of segments for
an increasingly larger part of a given neurite.
Note that the plots are not very meaningful for bifurcating trees.'''
def _dist(seg):
'''Distance between segmenr end and trunk'''
return morphmath.point_dist(seg[1], neurite.root_node.points[0])
end_to_end_distance = [_dist(s) for s in nm.iter_segments(neurite)]
make_end_to_end_distance_plot(np.arange(len(end_to_end_distance)) + 1,
end_to_end_distance, neurite.type)
if __name__ == '__main__':
# load a neuron from an SWC file
filename = 'test_data/swc/Neuron_3_random_walker_branches.swc'
nrn = nm.load_neuron(filename)
# print mean end-to-end distance per neurite type
print('Mean end-to-end distance for axons: ',
mean_end_to_end_dist(n for n in nrn.neurites if n.type == nm.AXON))
print('Mean end-to-end distance for basal dendrites: ',
mean_end_to_end_dist(n for n in nrn.neurites if n.type == nm.BASAL_DENDRITE))
print('Mean end-to-end distance for apical dendrites: ',
mean_end_to_end_dist(n for n in nrn.neurites
if n.type == nm.APICAL_DENDRITE))
print('End-to-end distance per neurite (nb segments, end-to-end distance, neurite type):')
for nrte in nrn.neurites:
# plot end-to-end distance for increasingly larger parts of neurite
calculate_and_plot_end_to_end_distance(nrte)
# print (number of segments, end-to-end distance, neurite type)
print(sum(len(s.points) - 1 for s in nrte.root_node.ipreorder()),
path_end_to_end_distance(nrte), nrte.type)
| bsd-3-clause |
zacharykirby/REU | ConvLSTM/convLSTM_image.py | 1 | 13494 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 28 21:22:59 2017
Last modified: Wed July 19, 2017
@author: maida, kirby
This is a convolutional LSTM prototype for predictive coding.
It uses a constant image for training.
"""
import os
import sys
#import numpy as np
#import matplotlib.pyplot as plt
#import matplotlib.cm as cm
import tensorflow as tf
from PIL import Image
import numpy
print("Python version :", sys.version)
print("TensorFlow version: ", tf.VERSION)
print("Current directory : ", os.getcwd())
# For logging w/ TensorBoard
# The /tmp directory is periodically cleaned, such as on reboot.
# Since you probably don't want to keep these logs around forever,
# this is a practical place to put them.
LOGDIR = "/tmp/convLSTM/"
IM_SZ_LEN = 64 # For later experiments, increase size as necessary
IM_SZ_WID = 64
BATCH_SZ = 1
NUM_UNROLLINGS = 2 # increase to 3 after debugging
#LEARNING_RATE = 0.1 # long story, may need simulated anealing
NUM_TRAINING_STEPS = 1201
graph = tf.Graph()
with graph.as_default():
file_contents = tf.read_file('image_0004_leafCropped.jpg')
image = tf.image.decode_jpeg(file_contents)
image = tf.image.rgb_to_grayscale(image) # Input to the LSTM !!!
image = tf.image.resize_images(image, [IM_SZ_LEN, IM_SZ_WID])
image = tf.expand_dims(image, 0)
image = (1/255.0) * image # normalize to range 0-1
# Variable (wt) definitions. Only variables can be trained.
# Naming conventions follow *Deep Learning*, Goodfellow et al, 2016.
# input update
with tf.name_scope('Input_Update_Weights'):
U = tf.Variable(tf.truncated_normal([5, 5, 2, 1], -0.1, 0.1), name="U")
W = tf.Variable(tf.truncated_normal([5, 5, 1, 1], -0.1, 0.1), name="W")
B = tf.Variable(tf.ones([1,IM_SZ_LEN, IM_SZ_WID,1]), name="B")
# input gate (g_gate): input, prev output, bias
with tf.name_scope('Input_Gate_Weights'):
Ug = tf.Variable(tf.truncated_normal([5, 5, 2, 1], -0.1, 0.1), name="Ug")
Wg = tf.Variable(tf.truncated_normal([5, 5, 1, 1], -0.1, 0.1), name="Wg")
Bg = tf.Variable(tf.ones([1,IM_SZ_LEN, IM_SZ_WID,1]), name="Bg")
# forget gate (f_gate): input, prev output, bias
with tf.name_scope('Forget_Gate_Weights'):
Uf = tf.Variable(tf.truncated_normal([5, 5, 2, 1], -0.1, 0.1), name="Uf")
Wf = tf.Variable(tf.truncated_normal([5, 5, 1, 1], -0.1, 0.1), name="Wf")
Bf = tf.Variable(tf.ones([1,IM_SZ_LEN, IM_SZ_WID,1]), name="Bf")
# output gate (q_gate): input, prev output, bias
with tf.name_scope('Output_Gate_Weights'):
Uo = tf.Variable(tf.truncated_normal([5, 5, 2, 1], -0.1, 0.1), name="Uo")
Wo = tf.Variable(tf.truncated_normal([5, 5, 1, 1], -0.1, 0.1), name="Wo")
Bo = tf.Variable(tf.ones([1,IM_SZ_LEN, IM_SZ_WID,1]), name="Bo")
def newEmpty4Dtensor_1channel():
"""
Returns a new 4D tensor with shape [1, 64, 64, 1].
All elements are initialized to zero.
"""
emptyTensor = tf.zeros([IM_SZ_LEN, IM_SZ_WID])
emptyTensor = tf.reshape(emptyTensor,[1,IM_SZ_LEN,IM_SZ_WID,1])
return emptyTensor
def newEmpty4Dtensor_2channels():
"""
Returns a new 4D tensor with shape [1, 64, 64, 2].
All elements are initialized to zero.
"""
emptyTensor = tf.zeros([IM_SZ_LEN, IM_SZ_WID, 2])
emptyTensor = tf.expand_dims(emptyTensor, axis=0)
return emptyTensor
# create some initializations
initial_lstm_state = newEmpty4Dtensor_1channel()
initial_lstm_output = newEmpty4Dtensor_1channel()
initial_err_input = newEmpty4Dtensor_2channels()
# The above weights are global to this definition.
def convLstmLayer(err_inp, prev_s, prev_h):
"""
Build an convLSTM layer w/o peephole connections.
Input args:
err_inp: current input (tensor: [1, 64, 64, 2])
prev_h : previous output (tensor: [1, 64, 64, 1])
prev_s : previous state (tensor: [1, 64, 64, 1])
Returns:
s : current state (tensor: [1, 64, 64, 1])
h : current output (tensor: [1, 64, 64, 1])
"""
with tf.name_scope("LSTM"):
inp = tf.nn.tanh(tf.nn.conv2d(err_inp, U, [1, 1, 1, 1], padding='SAME')
+ tf.nn.conv2d(prev_h, W, [1, 1, 1, 1], padding='SAME')
+ B, name="inp")
g_gate = tf.nn.tanh(tf.nn.conv2d(err_inp, Ug, [1, 1, 1, 1], padding='SAME')
+ tf.nn.conv2d(prev_h, Wg, [1, 1, 1, 1], padding='SAME')
+ Bg, name="g_gate") # i_gate is more common name
f_gate = tf.nn.tanh(tf.nn.conv2d(err_inp, Uf, [1, 1, 1, 1], padding='SAME')
+ tf.nn.conv2d(prev_h, Wf, [1, 1, 1, 1], padding='SAME')
+ Bf, name="f_gate")
q_gate = tf.nn.tanh(tf.nn.conv2d(err_inp, Uo, [1, 1, 1, 1], padding='SAME')
+ tf.nn.conv2d(prev_h, Wo, [1, 1, 1, 1], padding='SAME')
+ Bo, name="q_gate") # o_gate is more common name
s = tf.add(tf.multiply(f_gate, prev_s), tf.multiply(g_gate, inp), name="state")
h = tf.multiply(q_gate, tf.nn.relu6(s), name="output") # Also try relu
return s, h # normally above is tanh
# errorModule doesn't use variables, so doesn't undergo training
def errorModule(image, predict):
"""
Build an error representation for input to the convLSTM layer.
Input args:
image: target image (tensor: [1, 64, 64, 1])
predict: predicted image (tensor: [1, 64, 64, 1])
Returns:
tensor4D: Errs packed in 2 channels. (tensor: [1, 64, 64, 2])
"""
with tf.name_scope("ErrMod"):
err1 = tf.nn.relu(image - predict, name="E1")
err2 = tf.nn.relu(predict - image, name="E2")
tensor5D = tf.stack([err1, err2], axis=3)
tensor4D = tf.reshape(tensor5D, [1, IM_SZ_LEN, IM_SZ_WID, 2], name="PrdErr")
return tensor4D
# Build LSTM
lstm_state = initial_lstm_state
lstm_output = initial_lstm_output
err_input = initial_err_input
with tf.name_scope("full_model"):
for _ in range(NUM_UNROLLINGS): # three unrollings
lstm_state, lstm_output = convLstmLayer(err_input, lstm_state, lstm_output)
err_input = errorModule(image, lstm_output)
# "prediction" is always lstm_output
# error_module_output = errorModule(x, lstm_output)
#New optimizer block, uses exp decay on learning rate, added clip_by_global_norm
loss = tf.reduce_sum(err_input) # sums the values across each component of the tensor
global_step = tf.Variable(0)
#learning rate starts at 10, decreases by 90% every 300 steps
learning_rate = tf.train.exponential_decay(
10.0, global_step, 300, 0.05, staircase=True, name='LearningRate')
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
gradients, v = zip(*optimizer.compute_gradients(loss))
gradients, _ = tf.clip_by_global_norm(gradients,1.25)
optimizer = optimizer.apply_gradients(
zip(gradients,v),global_step=global_step)
with tf.name_scope("initializations"):
tf.summary.image("initial_lstm_state", initial_lstm_state, 3)
tf.summary.image("initial_lstm_output", initial_lstm_output, 3)
tf.summary.image("initial_error1",
tf.slice(initial_err_input, [0,0,0,0], [1, 64, 64, 1]), 3)
tf.summary.image("initial_error2",
tf.slice(initial_err_input, [0,0,0,1], [1, 64, 64, 1]), 3)
with tf.name_scope("input"):
tf.summary.image("image", image, 3)
with tf.name_scope("lstm"):
tf.summary.image("lstm_out", lstm_output, 3)
tf.summary.image("lstm_state", lstm_state, 3)
with tf.name_scope("error"):
tf.summary.image("perror_1",
tf.slice(err_input, [0,0,0,0], [1, 64, 64, 1]), 3)
tf.summary.image("perror_2",
tf.slice(err_input, [0,0,0,1], [1, 64, 64, 1]), 3)
with tf.name_scope('optimizer'):
tf.summary.scalar('loss',loss)
tf.summary.scalar('learning_rate',learning_rate)
with tf.name_scope('weights'):
with tf.name_scope('input_update'):
newU1 = tf.slice(U,[0,0,0,0],[5,5,1,1])
newU2 = tf.slice(U,[0,0,1,0],[5,5,1,1])
newW = tf.slice(W,[0,0,0,0],[5,5,1,1])
newU1 = tf.squeeze(newU1) #now a viewable [5x5] matrix
newU2 = tf.squeeze(newU2)
newW = tf.squeeze(newW)
newU1 = tf.reshape(newU1,[1,5,5,1])
newU2 = tf.reshape(newU2,[1,5,5,1])
newW = tf.reshape(newW,[1,5,5,1])
tf.summary.image('U1', newU1)
tf.summary.image('U2', newU2)
tf.summary.image('W', newW)
tf.summary.image('B', B)
with tf.name_scope('input_gate'):
newUg1 = tf.slice(Ug,[0,0,0,0],[5,5,1,1])
newUg2 = tf.slice(Ug,[0,0,1,0],[5,5,1,1])
newWg = tf.slice(Wg,[0,0,0,0],[5,5,1,1])
newUg1 = tf.squeeze(newUg1) #now a viewable [5x5] matrix
newUg2 = tf.squeeze(newUg2)
newWg = tf.squeeze(newWg)
newUg1 = tf.reshape(newUg1,[1,5,5,1])
newUg2 = tf.reshape(newUg2,[1,5,5,1])
newWg = tf.reshape(newWg,[1,5,5,1])
tf.summary.image('Ug1', newUg1)
tf.summary.image('Ug2', newUg2)
tf.summary.image('Wg', newWg)
tf.summary.image('Bg', Bg)
with tf.name_scope('forget_gate'):
newUf1 = tf.slice(Uf,[0,0,0,0],[5,5,1,1])
newUf2 = tf.slice(Uf,[0,0,1,0],[5,5,1,1])
newWf = tf.slice(Wf,[0,0,0,0],[5,5,1,1])
newUf1 = tf.squeeze(newUf1) #now a viewable [5x5] matrix
newUf2 = tf.squeeze(newUf2)
newWf = tf.squeeze(newWf)
newUf1 = tf.reshape(newUf1,[1,5,5,1])
newUf2 = tf.reshape(newUf2,[1,5,5,1])
newWf = tf.reshape(newWf,[1,5,5,1])
tf.summary.image('Uf1', newUf1)
tf.summary.image('Uf2', newUf2)
tf.summary.image('Wf', newWf)
tf.summary.image('Bf', Bf)
with tf.name_scope('output_gate'):
newUo1 = tf.slice(Uo,[0,0,0,0],[5,5,1,1])
newUo2 = tf.slice(Uo,[0,0,1,0],[5,5,1,1])
newWo = tf.slice(Wo,[0,0,0,0],[5,5,1,1])
newUo1 = tf.squeeze(newUo1) #now a viewable [5x5] matrix
newUo2 = tf.squeeze(newUo2)
newWo = tf.squeeze(newWo)
newUo1 = tf.reshape(newUo1,[1,5,5,1])
newUo2 = tf.reshape(newUo2,[1,5,5,1])
newWo = tf.reshape(newWo,[1,5,5,1])
tf.summary.image('Uo1', newUo1)
tf.summary.image('Uo2', newUo2)
tf.summary.image('Wo', newWo)
tf.summary.image('Bo', Bo)
# Start training
with tf.Session(graph=graph) as sess:
tf.global_variables_initializer().run()
# Create graph summary
# Use a different log file each time you run the program.
msumm = tf.summary.merge_all()
writer = tf.summary.FileWriter(LOGDIR + "0") # += 1 for each run till /tmp is cleard
writer.add_graph(sess.graph)
print("Shape of image: ", tf.shape(image).eval())
print("Rank of image: ", tf.rank(image).eval())
print("Size of image: ", tf.size(image).eval())
print("Shape of initial_lstm_state: ", tf.shape(initial_lstm_state).eval())
print("Rank of initial_lstm_state: ", tf.rank(initial_lstm_state).eval())
print("Size of initial_lstm_state: ", tf.size(initial_lstm_state).eval())
print("Shape of lstm_state: ", tf.shape(lstm_state).eval())
print("Rank of lstm_state: ", tf.rank(lstm_state).eval())
print("Size of lstm_state: ", tf.size(lstm_state).eval())
print("Shape of initial_lstm_output: ", tf.shape(initial_lstm_output).eval())
print("Rank of initial_lstm_output: ", tf.rank(initial_lstm_output).eval())
print("Size of initial_lstm_output: ", tf.size(initial_lstm_output).eval())
print("Shape of lstm_output: ", tf.shape(lstm_output).eval())
print("Rank of lstm_output: ", tf.rank(lstm_output).eval())
print("Size of lstm_output: ", tf.size(lstm_output).eval())
print("Shape of initial_err_input: ", tf.shape(initial_err_input).eval())
print("Rank of initial_err_input: ", tf.rank(initial_err_input).eval())
print("Size of initial_err_input: ", tf.size(initial_err_input).eval())
print("Shape of err_input: ", tf.shape(err_input).eval())
print("Rank of err_input: ", tf.rank(err_input).eval())
print("Size of err_input: ", tf.size(err_input).eval())
# Below would only used to test if the input makes sense
# output = sess.run(image)
for step in range(NUM_TRAINING_STEPS): # 0 to 100
if step % 1 == 0:
ms = sess.run(msumm)
writer.add_summary(ms, step)
_, l, predictions = sess.run([optimizer, loss, lstm_output])
print("Step: ", step)
print("Loss: ", l)
| mit |
DGrady/pandas | pandas/tests/plotting/test_deprecated.py | 6 | 1496 | # coding: utf-8
import string
import pandas as pd
import pandas.util.testing as tm
import pytest
from numpy.random import randn
import pandas.tools.plotting as plotting
from pandas.tests.plotting.common import TestPlotBase
"""
Test cases for plot functions imported from deprecated
pandas.tools.plotting
"""
tm._skip_if_no_mpl()
class TestDeprecatedNameSpace(TestPlotBase):
@pytest.mark.slow
def test_scatter_plot_legacy(self):
tm._skip_if_no_scipy()
df = pd.DataFrame(randn(100, 2))
with tm.assert_produces_warning(FutureWarning):
plotting.scatter_matrix(df)
with tm.assert_produces_warning(FutureWarning):
pd.scatter_matrix(df)
@pytest.mark.slow
def test_boxplot_deprecated(self):
df = pd.DataFrame(randn(6, 4),
index=list(string.ascii_letters[:6]),
columns=['one', 'two', 'three', 'four'])
df['indic'] = ['foo', 'bar'] * 3
with tm.assert_produces_warning(FutureWarning):
plotting.boxplot(df, column=['one', 'two'],
by='indic')
@pytest.mark.slow
def test_radviz_deprecated(self):
df = self.iris
with tm.assert_produces_warning(FutureWarning):
plotting.radviz(frame=df, class_column='Name')
@pytest.mark.slow
def test_plot_params(self):
with tm.assert_produces_warning(FutureWarning):
pd.plot_params['xaxis.compat'] = True
| bsd-3-clause |
ilo10/scikit-learn | examples/svm/plot_separating_hyperplane.py | 294 | 1273 | """
=========================================
SVM: Maximum margin separating hyperplane
=========================================
Plot the maximum margin separating hyperplane within a two-class
separable dataset using a Support Vector Machine classifier with
linear kernel.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
# we create 40 separable points
np.random.seed(0)
X = np.r_[np.random.randn(20, 2) - [2, 2], np.random.randn(20, 2) + [2, 2]]
Y = [0] * 20 + [1] * 20
# fit the model
clf = svm.SVC(kernel='linear')
clf.fit(X, Y)
# get the separating hyperplane
w = clf.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(-5, 5)
yy = a * xx - (clf.intercept_[0]) / w[1]
# plot the parallels to the separating hyperplane that pass through the
# support vectors
b = clf.support_vectors_[0]
yy_down = a * xx + (b[1] - a * b[0])
b = clf.support_vectors_[-1]
yy_up = a * xx + (b[1] - a * b[0])
# plot the line, the points, and the nearest vectors to the plane
plt.plot(xx, yy, 'k-')
plt.plot(xx, yy_down, 'k--')
plt.plot(xx, yy_up, 'k--')
plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1],
s=80, facecolors='none')
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.axis('tight')
plt.show()
| bsd-3-clause |
poojavade/Genomics_Docker | Dockerfiles/gedlab-khmer-filter-abund/pymodules/python2.7/lib/python/statsmodels-0.5.0-py2.7-linux-x86_64.egg/statsmodels/iolib/tests/test_foreign.py | 3 | 7414 | """
Tests for iolib/foreign.py
"""
import os
import warnings
from datetime import datetime
from numpy.testing import *
import numpy as np
from pandas import DataFrame, isnull
import pandas.util.testing as ptesting
from statsmodels.compatnp.py3k import BytesIO, asbytes
import statsmodels.api as sm
from statsmodels.iolib.foreign import (StataWriter, genfromdta,
_datetime_to_stata_elapsed, _stata_elapsed_date_to_datetime)
from statsmodels.datasets import macrodata
import pandas
pandas_old = int(pandas.__version__.split('.')[1]) < 9
# Test precisions
DECIMAL_4 = 4
DECIMAL_3 = 3
curdir = os.path.dirname(os.path.abspath(__file__))
def test_genfromdta():
#Test genfromdta vs. results/macrodta.npy created with genfromtxt.
#NOTE: Stata handles data very oddly. Round tripping from csv to dta
# to ndarray 2710.349 (csv) -> 2510.2491 (stata) -> 2710.34912109375
# (dta/ndarray)
#res2 = np.load(curdir+'/results/macrodata.npy')
#res2 = res2.view((float,len(res2[0])))
from results.macrodata import macrodata_result as res2
res1 = genfromdta(curdir+'/../../datasets/macrodata/macrodata.dta')
#res1 = res1.view((float,len(res1[0])))
assert_array_equal(res1 == res2, True)
def test_genfromdta_pandas():
from pandas.util.testing import assert_frame_equal
dta = macrodata.load_pandas().data
curdir = os.path.dirname(os.path.abspath(__file__))
res1 = sm.iolib.genfromdta(curdir+'/../../datasets/macrodata/macrodata.dta',
pandas=True)
res1 = res1.astype(float)
assert_frame_equal(res1, dta)
def test_stata_writer_structured():
buf = BytesIO()
dta = macrodata.load().data
dtype = dta.dtype
dta = dta.astype(np.dtype([('year', int),
('quarter', int)] + dtype.descr[2:]))
writer = StataWriter(buf, dta)
writer.write_file()
buf.seek(0)
dta2 = genfromdta(buf)
assert_array_equal(dta, dta2)
def test_stata_writer_array():
buf = BytesIO()
dta = macrodata.load().data
dta = DataFrame.from_records(dta)
dta.columns = ["v%d" % i for i in range(1,15)]
writer = StataWriter(buf, dta.values)
writer.write_file()
buf.seek(0)
dta2 = genfromdta(buf)
dta = dta.to_records(index=False)
assert_array_equal(dta, dta2)
def test_missing_roundtrip():
buf = BytesIO()
dta = np.array([(np.nan, np.inf, "")],
dtype=[("double_miss", float), ("float_miss", np.float32),
("string_miss", "a1")])
writer = StataWriter(buf, dta)
writer.write_file()
buf.seek(0)
dta = genfromdta(buf, missing_flt=np.nan)
assert_(isnull(dta[0][0]))
assert_(isnull(dta[0][1]))
assert_(dta[0][2] == asbytes(""))
dta = genfromdta(os.path.join(curdir, "results/data_missing.dta"),
missing_flt=-999)
assert_(np.all([dta[0][i] == -999 for i in range(5)]))
def test_stata_writer_pandas():
buf = BytesIO()
dta = macrodata.load().data
dtype = dta.dtype
#as of 0.9.0 pandas only supports i8 and f8
dta = dta.astype(np.dtype([('year', 'i8'),
('quarter', 'i8')] + dtype.descr[2:]))
dta4 = dta.astype(np.dtype([('year', 'i4'),
('quarter', 'i4')] + dtype.descr[2:]))
dta = DataFrame.from_records(dta)
dta4 = DataFrame.from_records(dta4)
# dta is int64 'i8' given to Stata writer
writer = StataWriter(buf, dta)
writer.write_file()
buf.seek(0)
dta2 = genfromdta(buf)
dta5 = DataFrame.from_records(dta2)
# dta2 is int32 'i4' returned from Stata reader
if dta5.dtypes[1] is np.dtype('int64'):
ptesting.assert_frame_equal(dta.reset_index(), dta5)
else:
# don't check index because it has different size, int32 versus int64
ptesting.assert_frame_equal(dta4, dta5[dta5.columns[1:]])
def test_stata_writer_unicode():
# make sure to test with characters outside the latin-1 encoding
pass
@dec.skipif(pandas_old)
def test_genfromdta_datetime():
results = [(datetime(2006, 11, 19, 23, 13, 20), 1479596223000,
datetime(2010, 1, 20), datetime(2010, 1, 8), datetime(2010, 1, 1),
datetime(1974, 7, 1), datetime(2010, 1, 1), datetime(2010, 1, 1)),
(datetime(1959, 12, 31, 20, 3, 20), -1479590, datetime(1953, 10, 2),
datetime(1948, 6, 10), datetime(1955, 1, 1), datetime(1955, 7, 1),
datetime(1955, 1, 1), datetime(2, 1, 1))]
with warnings.catch_warnings(record=True) as w:
dta = genfromdta(os.path.join(curdir, "results/time_series_examples.dta"))
assert_(len(w) == 1) # should get a warning for that format.
assert_array_equal(dta[0].tolist(), results[0])
assert_array_equal(dta[1].tolist(), results[1])
with warnings.catch_warnings(record=True):
dta = genfromdta(os.path.join(curdir, "results/time_series_examples.dta"),
pandas=True)
assert_array_equal(dta.irow(0).tolist(), results[0])
assert_array_equal(dta.irow(1).tolist(), results[1])
def test_date_converters():
ms = [-1479597200000, -1e6, -1e5, -100, 1e5, 1e6, 1479597200000]
days = [-1e5, -1200, -800, -365, -50, 0, 50, 365, 800, 1200, 1e5]
weeks = [-1e4, -1e2, -53, -52, -51, 0, 51, 52, 53, 1e2, 1e4]
months = [-1e4, -1e3, -100, -13, -12, -11, 0, 11, 12, 13, 100, 1e3, 1e4]
quarter = [-100, -50, -5, -4, -3, 0, 3, 4, 5, 50, 100]
half = [-50, 40, 30, 10, 3, 2, 1, 0, 1, 2, 3, 10, 30, 40, 50]
year = [1, 50, 500, 1000, 1500, 1975, 2075]
for i in ms:
assert_equal(_datetime_to_stata_elapsed(
_stata_elapsed_date_to_datetime(i, "tc"), "tc"), i)
for i in days:
assert_equal(_datetime_to_stata_elapsed(
_stata_elapsed_date_to_datetime(i, "td"), "td"), i)
for i in weeks:
assert_equal(_datetime_to_stata_elapsed(
_stata_elapsed_date_to_datetime(i, "tw"), "tw"), i)
for i in months:
assert_equal(_datetime_to_stata_elapsed(
_stata_elapsed_date_to_datetime(i, "tm"), "tm"), i)
for i in quarter:
assert_equal(_datetime_to_stata_elapsed(
_stata_elapsed_date_to_datetime(i, "tq"), "tq"), i)
for i in half:
assert_equal(_datetime_to_stata_elapsed(
_stata_elapsed_date_to_datetime(i, "th"), "th"), i)
for i in year:
assert_equal(_datetime_to_stata_elapsed(
_stata_elapsed_date_to_datetime(i, "ty"), "ty"), i)
@dec.skipif(pandas_old)
def test_datetime_roundtrip():
dta = np.array([(1, datetime(2010, 1, 1), 2),
(2, datetime(2010, 2, 1), 3),
(4, datetime(2010, 3, 1), 5)],
dtype=[('var1', float), ('var2', object), ('var3', float)])
buf = BytesIO()
writer = StataWriter(buf, dta, {"var2" : "tm"})
writer.write_file()
buf.seek(0)
dta2 = genfromdta(buf)
assert_equal(dta, dta2)
dta = DataFrame.from_records(dta)
buf = BytesIO()
writer = StataWriter(buf, dta, {"var2" : "tm"})
writer.write_file()
buf.seek(0)
dta2 = genfromdta(buf, pandas=True)
ptesting.assert_frame_equal(dta, dta2.drop('index', axis=1))
if __name__ == "__main__":
import nose
nose.runmodule(argv=[__file__,'-vvs','-x','--pdb'],
exit=False)
| apache-2.0 |
nsoojin/coursera-ml-py | machine-learning-ex1/ex1/ex1_multi.py | 1 | 3745 | import matplotlib.pyplot as plt
import numpy as np
from featureNormalize import *
from gradientDescent import *
from normalEqn import *
plt.ion()
# ===================== Part 1: Feature Normalization =====================
print('Loading Data...')
data = np.loadtxt('ex1data2.txt', delimiter=',', dtype=np.int64)
X = data[:, 0:2]
y = data[:, 2]
m = y.size
# Print out some data points
print('First 10 examples from the dataset: ')
for i in range(0, 10):
print('x = {}, y = {}'.format(X[i], y[i]))
input('Program paused. Press ENTER to continue')
# Scale features and set them to zero mean
print('Normalizing Features ...')
X, mu, sigma = feature_normalize(X)
X = np.c_[np.ones(m), X] # Add a column of ones to X
# ===================== Part 2: Gradient Descent =====================
# ===================== Your Code Here =====================
# Instructions : We have provided you with the following starter
# code that runs gradient descent with a particular
# learning rate (alpha).
#
# Your task is to first make sure that your functions -
# computeCost and gradientDescent already work with
# this starter code and support multiple variables.
#
# After that, try running gradient descent with
# different values of alpha and see which one gives
# you the best result.
#
# Finally, you should complete the code at the end
# to predict the price of a 1650 sq-ft, 3 br house.
#
# Hint: At prediction, make sure you do the same feature normalization.
#
print('Running gradient descent ...')
# Choose some alpha value
alpha = 0.03
num_iters = 400
# Init theta and Run Gradient Descent
theta = np.zeros(3)
theta, J_history = gradient_descent_multi(X, y, theta, alpha, num_iters)
# Plot the convergence graph
plt.figure()
plt.plot(np.arange(J_history.size), J_history)
plt.xlabel('Number of iterations')
plt.ylabel('Cost J')
# Display gradient descent's result
print('Theta computed from gradient descent : \n{}'.format(theta))
# Estimate the price of a 1650 sq-ft, 3 br house
# ===================== Your Code Here =====================
# Recall that the first column of X is all-ones. Thus, it does
# not need to be normalized.
price = 0 # You should change this
# ==========================================================
print('Predicted price of a 1650 sq-ft, 3 br house (using gradient descent) : {:0.3f}'.format(price))
input('Program paused. Press ENTER to continue')
# ===================== Part 3: Normal Equations =====================
print('Solving with normal equations ...')
# ===================== Your Code Here =====================
# Instructions : The following code computes the closed form
# solution for linear regression using the normal
# equations. You should complete the code in
# normalEqn.py
#
# After doing so, you should complete this code
# to predict the price of a 1650 sq-ft, 3 br house.
#
# Load data
data = np.loadtxt('ex1data2.txt', delimiter=',', dtype=np.int64)
X = data[:, 0:2]
y = data[:, 2]
m = y.size
# Add intercept term to X
X = np.c_[np.ones(m), X]
theta = normal_eqn(X, y)
# Display normal equation's result
print('Theta computed from the normal equations : \n{}'.format(theta))
# Estimate the price of a 1650 sq-ft, 3 br house
# ===================== Your Code Here =====================
price = 0 # You should change this
# ==========================================================
print('Predicted price of a 1650 sq-ft, 3 br house (using normal equations) : {:0.3f}'.format(price))
input('ex1_multi Finished. Press ENTER to exit')
| mit |
CloudVLab/professional-services | tools/ml-auto-eda/tests/quantitative_analyzer_test.py | 2 | 1694 | # Copyright 2019 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test cases for quantitative analyzer"""
from __future__ import absolute_import
from __future__ import print_function
from unittest import TestCase
import pandas as pd
from ml_eda import constants
from ml_eda.analysis import quantitative_analyzer
class TestCorrelator(TestCase):
"""Test cases for quantitative analysis"""
_analyzer = quantitative_analyzer.QuantitativeAnalyzer()
def test_anova_one_way(self):
"""Test case for ANOVA"""
data = [
['1', 10, 11.203, 3.980025, 3, 36],
['2', 10, 8.938, 8.8804, 3, 36],
['3', 10, 10.683, 1.214404, 3, 36],
['4', 10, 8.838, 3.530641, 3, 36]
]
anova_df = pd.DataFrame(data, columns=[
constants.ANOVA_CATEGORICAL,
constants.ANOVA_COUNT_PER_CLASS,
constants.ANOVA_MEAN_PER_CLASS,
constants.ANOVA_VARIANCE_PER_CLASS,
constants.ANOVA_DF_GROUP,
constants.ANOVA_DF_ERROR])
f_stat = self._analyzer.anova_one_way(anova_df)
assert abs(f_stat - 3.30444) < 0.00001
| apache-2.0 |
Tahsin-Mayeesha/udacity-mlnd-deeplearning-capstone | scripts/data_augmentation.py | 1 | 4604 |
# coding: utf-8
# In[1]:
import os
import pandas as pd
import matplotlib.pyplot as plt
# In[2]:
from keras.applications.vgg16 import VGG16
from keras.models import Model
from keras.callbacks import ModelCheckpoint,EarlyStopping
from keras.preprocessing.image import ImageDataGenerator
# In[3]:
from keras.utils import np_utils
from keras.models import Sequential
from keras.callbacks import EarlyStopping, History, ModelCheckpoint
from keras.layers.core import Flatten, Dense, Dropout, Reshape, Lambda
from keras.layers.normalization import BatchNormalization
# In[16]:
from sklearn.preprocessing import LabelEncoder
from keras.utils.np_utils import to_categorical
from sklearn.metrics import log_loss
from sklearn.model_selection import train_test_split
# In[8]:
import numpy as np
# In[9]:
train_features = np.load('train_preprocesed.npy')
valid_features = np.load('valid_preprocessed.npy')
# In[10]:
train_dir = "new_train/"
valid_dir = "new_valid/"
# In[11]:
classes = os.listdir(train_dir)
# In[12]:
# Get the labels
train_labels = []
for c in classes:
l = [c]*len(os.listdir(train_dir+c+'/'))
train_labels.extend(l)
# In[25]:
len(train_labels)
# In[17]:
valid_labels = []
for c in classes:
l = [c]*len(os.listdir(valid_dir+c+'/'))
valid_labels.extend(l)
# In[18]:
onehot_train = to_categorical(LabelEncoder().fit_transform(train_labels))
# In[19]:
onehot_valid = to_categorical(LabelEncoder().fit_transform(valid_labels))
# In[20]:
vgg16_base = VGG16(include_top=False, weights='imagenet',
input_tensor=None, input_shape=(150, 150,3))
# Note that the preprocessing of InceptionV3 is:
# (x / 255 - 0.5) x 2
print('Adding new layers...')
output = vgg16_base.get_layer(index = -1).output
output = Flatten()(output)
# let's add a fully-connected layer
output = Dense(4096,activation = "relu")(output)
output = BatchNormalization()(output)
output = Dropout(0.5)(output)
output = Dense(512,activation = "relu")(output)
output = BatchNormalization()(output)
output = Dropout(0.5)(output)
# and a logistic layer -- let's say we have 200 classes
output = Dense(8, activation='softmax')(output)
vgg16_model = Model(vgg16_base.input, output)
#InceptionV3_model.summary()
# In[ ]:
for layer in vgg16_model.layers[:19]:
layer.trainable = False
# In[21]:
vgg16_model.compile(optimizer="adam",loss="categorical_crossentropy",metrics =["accuracy"])
# In[35]:
train_datagen = ImageDataGenerator(
shear_range=0.1,
zoom_range=0.1,
rotation_range=10.,
width_shift_range=0.1,
height_shift_range=0.1,
horizontal_flip=True)
val_datagen = ImageDataGenerator()
# In[38]:
callbacks = EarlyStopping(monitor='val_loss', patience=1, verbose=1, mode='auto')
# autosave best Model
best_model_file = "./data_augmented_weights.h5"
best_model = ModelCheckpoint(best_model_file, monitor='val_acc', verbose = 1, save_best_only = True)
# In[39]:
history = vgg16_model.fit_generator(train_datagen.flow(train_features, onehot_train, batch_size=10), nb_epoch=5,
samples_per_epoch = 3019,
validation_data=val_datagen.flow(valid_features,onehot_valid,batch_size=10,shuffle=False),
nb_val_samples=758,callbacks = [callbacks,best_model])
# In[34]:
#model.load_weights("batch_normalized_weights.h5")
# In[ ]:
# summarize history for accuracy
plt.figure(figsize=(15, 5))
plt.subplot(1, 2, 1)
plt.plot(history.history['acc']); plt.plot(history.history['val_acc']);
plt.title('model accuracy'); plt.ylabel('accuracy');
plt.xlabel('epoch'); plt.legend(['train', 'valid'], loc='upper left');
# summarize history for loss
plt.subplot(1, 2, 2)
plt.plot(history.history['loss']); plt.plot(history.history['val_loss']);
plt.title('model loss'); plt.ylabel('loss');
plt.xlabel('epoch'); plt.legend(['train', 'valid'], loc='upper left');
plt.show()
# In[17]:
test_features = np.load("test_features.npy")
# In[18]:
test_preds = model.predict_proba(test_features, verbose=1)
# In[19]:
test_preds[0:5]
# In[21]:
submission1 = pd.DataFrame(test_preds, columns= os.listdir(train_dir))
test_files = os.listdir("test_stg1/test_stg1/")
submission1.insert(0, 'image', test_files)
submission1.head()
# In[27]:
clipped_preds = np.clip(test_preds,(1-0.82)/7,0.82)
submission2 = pd.DataFrame(clipped_preds, columns= os.listdir("train/train/"))
submission2.insert(0, 'image', test_files)
submission2.head()
# In[28]:
submission2.to_csv("batch_normalized.csv",index = False)
# In[ ]:
| mit |
mattgiguere/scikit-learn | sklearn/decomposition/tests/test_nmf.py | 14 | 6123 | import numpy as np
from scipy import linalg
from sklearn.decomposition import nmf
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import raises
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
random_state = np.random.mtrand.RandomState(0)
@raises(ValueError)
def test_initialize_nn_input():
# Test NNDSVD behaviour on negative input
nmf._initialize_nmf(-np.ones((2, 2)), 2)
def test_initialize_nn_output():
# Test that NNDSVD does not return negative values
data = np.abs(random_state.randn(10, 10))
for var in (None, 'a', 'ar'):
W, H = nmf._initialize_nmf(data, 10, random_state=0)
assert_false((W < 0).any() or (H < 0).any())
def test_initialize_close():
# Test NNDSVD error
# Test that _initialize_nmf error is less than the standard deviation of
# the entries in the matrix.
A = np.abs(random_state.randn(10, 10))
W, H = nmf._initialize_nmf(A, 10)
error = linalg.norm(np.dot(W, H) - A)
sdev = linalg.norm(A - A.mean())
assert_true(error <= sdev)
def test_initialize_variants():
# Test NNDSVD variants correctness
# Test that the variants 'a' and 'ar' differ from basic NNDSVD only where
# the basic version has zeros.
data = np.abs(random_state.randn(10, 10))
W0, H0 = nmf._initialize_nmf(data, 10, variant=None)
Wa, Ha = nmf._initialize_nmf(data, 10, variant='a')
War, Har = nmf._initialize_nmf(data, 10, variant='ar', random_state=0)
for ref, evl in ((W0, Wa), (W0, War), (H0, Ha), (H0, Har)):
assert_true(np.allclose(evl[ref != 0], ref[ref != 0]))
@raises(ValueError)
def test_projgrad_nmf_fit_nn_input():
# Test model fit behaviour on negative input
A = -np.ones((2, 2))
m = nmf.ProjectedGradientNMF(n_components=2, init=None, random_state=0)
m.fit(A)
def test_projgrad_nmf_fit_nn_output():
# Test that the decomposition does not contain negative values
A = np.c_[5 * np.ones(5) - np.arange(1, 6),
5 * np.ones(5) + np.arange(1, 6)]
for init in (None, 'nndsvd', 'nndsvda', 'nndsvdar'):
model = nmf.ProjectedGradientNMF(n_components=2, init=init,
random_state=0)
transf = model.fit_transform(A)
assert_false((model.components_ < 0).any() or
(transf < 0).any())
def test_projgrad_nmf_fit_close():
# Test that the fit is not too far away
pnmf = nmf.ProjectedGradientNMF(5, init='nndsvda', random_state=0)
X = np.abs(random_state.randn(6, 5))
assert_less(pnmf.fit(X).reconstruction_err_, 0.05)
def test_nls_nn_output():
# Test that NLS solver doesn't return negative values
A = np.arange(1, 5).reshape(1, -1)
Ap, _, _ = nmf._nls_subproblem(np.dot(A.T, -A), A.T, A, 0.001, 100)
assert_false((Ap < 0).any())
def test_nls_close():
# Test that the NLS results should be close
A = np.arange(1, 5).reshape(1, -1)
Ap, _, _ = nmf._nls_subproblem(np.dot(A.T, A), A.T, np.zeros_like(A),
0.001, 100)
assert_true((np.abs(Ap - A) < 0.01).all())
def test_projgrad_nmf_transform():
# Test that NMF.transform returns close values
# (transform uses scipy.optimize.nnls for now)
A = np.abs(random_state.randn(6, 5))
m = nmf.ProjectedGradientNMF(n_components=5, init='nndsvd', random_state=0)
transf = m.fit_transform(A)
assert_true(np.allclose(transf, m.transform(A), atol=1e-2, rtol=0))
def test_n_components_greater_n_features():
# Smoke test for the case of more components than features.
A = np.abs(random_state.randn(30, 10))
nmf.ProjectedGradientNMF(n_components=15, sparseness='data',
random_state=0).fit(A)
def test_projgrad_nmf_sparseness():
# Test sparseness
# Test that sparsity constraints actually increase sparseness in the
# part where they are applied.
A = np.abs(random_state.randn(10, 10))
m = nmf.ProjectedGradientNMF(n_components=5, random_state=0).fit(A)
data_sp = nmf.ProjectedGradientNMF(n_components=5, sparseness='data',
random_state=0).fit(A).data_sparseness_
comp_sp = nmf.ProjectedGradientNMF(n_components=5, sparseness='components',
random_state=0).fit(A).comp_sparseness_
assert_greater(data_sp, m.data_sparseness_)
assert_greater(comp_sp, m.comp_sparseness_)
def test_sparse_input():
# Test that sparse matrices are accepted as input
from scipy.sparse import csc_matrix
A = np.abs(random_state.randn(10, 10))
A[:, 2 * np.arange(5)] = 0
T1 = nmf.ProjectedGradientNMF(n_components=5, init='random',
random_state=999).fit_transform(A)
A_sparse = csc_matrix(A)
pg_nmf = nmf.ProjectedGradientNMF(n_components=5, init='random',
random_state=999)
T2 = pg_nmf.fit_transform(A_sparse)
assert_array_almost_equal(pg_nmf.reconstruction_err_,
linalg.norm(A - np.dot(T2, pg_nmf.components_),
'fro'))
assert_array_almost_equal(T1, T2)
# same with sparseness
T2 = nmf.ProjectedGradientNMF(
n_components=5, init='random', sparseness='data',
random_state=999).fit_transform(A_sparse)
T1 = nmf.ProjectedGradientNMF(
n_components=5, init='random', sparseness='data',
random_state=999).fit_transform(A)
def test_sparse_transform():
# Test that transform works on sparse data. Issue #2124
from scipy.sparse import csc_matrix
A = np.abs(random_state.randn(5, 4))
A[A > 1.0] = 0
A = csc_matrix(A)
model = nmf.NMF()
A_fit_tr = model.fit_transform(A)
A_tr = model.transform(A)
# This solver seems pretty inconsistent
assert_array_almost_equal(A_fit_tr, A_tr, decimal=2)
if __name__ == '__main__':
import nose
nose.run(argv=['', __file__])
| bsd-3-clause |
bblay/iris | lib/iris/unit.py | 2 | 60011 | # (C) British Crown Copyright 2010 - 2013, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""
Units of measure.
Provision of a wrapper class to support Unidata/UCAR UDUNITS-2, and the
netcdftime calendar functionality.
See also: `UDUNITS-2
<http://www.unidata.ucar.edu/software/udunits/udunits-2/udunits2.html>`_.
"""
from __future__ import division
import copy
import ctypes
import ctypes.util
import warnings
import netcdftime
import numpy as np
import iris.config
import iris.util
__all__ = ['Unit', 'date2num', 'decode_time', 'encode_clock', 'encode_date',
'encode_time', 'num2date']
########################################################################
#
# module level constants
#
########################################################################
#
# default constants
#
IRIS_EPOCH = '1970-01-01 00:00:00'
_STRING_BUFFER_DEPTH = 128
_UNKNOWN_UNIT_STRING = 'unknown'
_UNKNOWN_UNIT_SYMBOL = '?'
_UNKNOWN_UNIT = [_UNKNOWN_UNIT_STRING, _UNKNOWN_UNIT_SYMBOL, '???', '']
_NO_UNIT_STRING = 'no_unit'
_NO_UNIT_SYMBOL = '-'
_NO_UNIT = [_NO_UNIT_STRING, _NO_UNIT_SYMBOL, 'no unit', 'no-unit', 'nounit']
_UNIT_DIMENSIONLESS = '1'
_OP_SINCE = ' since '
_CATEGORY_UNKNOWN, _CATEGORY_NO_UNIT, _CATEGORY_UDUNIT = range(3)
#
# libudunits2 constants
#
# ut_status enumerations
_UT_STATUS = ['UT_SUCCESS', 'UT_BAD_ARG', 'UT_EXISTS', 'UT_NO_UNIT',
'UT_OS', 'UT_NOT_SAME_NAME', 'UT_MEANINGLESS', 'UT_NO_SECOND',
'UT_VISIT_ERROR', 'UT_CANT_FORMAT', 'UT_SYNTAX', 'UT_UNKNOWN',
'UT_OPEN_ARG', 'UT_OPEN_ENV', 'UT_OPEN_DEFAULT', 'UT_PARSE']
# explicit function names
_UT_HANDLER = 'ut_set_error_message_handler'
_UT_IGNORE = 'ut_ignore'
# ut_encoding enumerations
UT_ASCII = 0
UT_ISO_8859_1 = 1
UT_LATIN1 = 1
UT_UTF8 = 2
UT_NAMES = 4
UT_DEFINITION = 8
UT_FORMATS = [UT_ASCII, UT_ISO_8859_1, UT_LATIN1, UT_UTF8, UT_NAMES,
UT_DEFINITION]
#
# netcdftime constants
#
CALENDAR_STANDARD = 'standard'
CALENDAR_GREGORIAN = 'gregorian'
CALENDAR_PROLEPTIC_GREGORIAN = 'proleptic_gregorian'
CALENDAR_NO_LEAP = 'noleap'
CALENDAR_JULIAN = 'julian'
CALENDAR_ALL_LEAP = 'all_leap'
CALENDAR_365_DAY = '365_day'
CALENDAR_366_DAY = '366_day'
CALENDAR_360_DAY = '360_day'
CALENDARS = [CALENDAR_STANDARD, CALENDAR_GREGORIAN,
CALENDAR_PROLEPTIC_GREGORIAN, CALENDAR_NO_LEAP, CALENDAR_JULIAN,
CALENDAR_ALL_LEAP, CALENDAR_365_DAY, CALENDAR_366_DAY,
CALENDAR_360_DAY]
#
# ctypes types
#
FLOAT32 = ctypes.c_float
FLOAT64 = ctypes.c_double
########################################################################
#
# module level variables
#
########################################################################
# cache for ctypes foreign shared library handles
_lib_c = None
_lib_ud = None
_ud_system = None
# cache for libc shared library functions
_strerror = None
# class cache for libudunits2 shared library functions
_cv_convert_float = None
_cv_convert_floats = None
_cv_convert_double = None
_cv_convert_doubles = None
_cv_free = None
_ut_are_convertible = None
_ut_clone = None
_ut_compare = None
_ut_decode_time = None
_ut_divide = None
_ut_encode_clock = None
_ut_encode_date = None
_ut_encode_time = None
_ut_format = None
_ut_free = None
_ut_get_converter = None
_ut_get_status = None
_ut_get_unit_by_name = None
_ut_ignore = None
_ut_invert = None
_ut_is_dimensionless = None
_ut_log = None
_ut_multiply = None
_ut_offset = None
_ut_offset_by_time = None
_ut_parse = None
_ut_raise = None
_ut_read_xml = None
_ut_root = None
_ut_scale = None
_ut_set_error_message_handler = None
########################################################################
#
# module level statements
#
########################################################################
#
# load the libc shared library
#
if _lib_c is None:
_lib_c = ctypes.CDLL(ctypes.util.find_library('libc'))
#
# cache common shared library functions
#
_strerror = _lib_c.strerror
_strerror.restype = ctypes.c_char_p
#
# load the libudunits2 shared library
#
if _lib_ud is None:
_lib_ud = iris.config.get_option(
'System', 'udunits2_path',
default=ctypes.util.find_library('udunits2'))
_lib_ud = ctypes.CDLL(_lib_ud, use_errno=True)
#
# cache common shared library functions
#
_cv_convert_float = _lib_ud.cv_convert_float
_cv_convert_float.argtypes = [ctypes.c_void_p, ctypes.c_float]
_cv_convert_float.restype = ctypes.c_float
_cv_convert_floats = _lib_ud.cv_convert_floats
_cv_convert_floats.argtypes = [ctypes.c_void_p, ctypes.c_void_p,
ctypes.c_ulong, ctypes.c_void_p]
_cv_convert_floats.restype = ctypes.c_void_p
_cv_convert_double = _lib_ud.cv_convert_double
_cv_convert_double.argtypes = [ctypes.c_void_p, ctypes.c_double]
_cv_convert_double.restype = ctypes.c_double
_cv_convert_doubles = _lib_ud.cv_convert_doubles
_cv_convert_doubles.argtypes = [ctypes.c_void_p, ctypes.c_void_p,
ctypes.c_ulong, ctypes.c_void_p]
_cv_convert_doubles.restype = ctypes.c_void_p
_cv_free = _lib_ud.cv_free
_cv_free.argtypes = [ctypes.c_void_p]
_ut_are_convertible = _lib_ud.ut_are_convertible
_ut_are_convertible.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
_ut_clone = _lib_ud.ut_clone
_ut_clone.argtypes = [ctypes.c_void_p]
_ut_clone.restype = ctypes.c_void_p
_ut_compare = _lib_ud.ut_compare
_ut_compare.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
_ut_compare.restype = ctypes.c_int
_ut_decode_time = _lib_ud.ut_decode_time
_ut_decode_time.restype = None
_ut_divide = _lib_ud.ut_divide
_ut_divide.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
_ut_divide.restype = ctypes.c_void_p
_ut_encode_clock = _lib_ud.ut_encode_clock
_ut_encode_clock.restype = ctypes.c_double
_ut_encode_date = _lib_ud.ut_encode_date
_ut_encode_date.restype = ctypes.c_double
_ut_encode_time = _lib_ud.ut_encode_time
_ut_encode_time.restype = ctypes.c_double
_ut_format = _lib_ud.ut_format
_ut_format.argtypes = [ctypes.c_void_p, ctypes.c_char_p,
ctypes.c_ulong, ctypes.c_uint]
_ut_free = _lib_ud.ut_free
_ut_free.argtypes = [ctypes.c_void_p]
_ut_free.restype = None
_ut_get_converter = _lib_ud.ut_get_converter
_ut_get_converter.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
_ut_get_converter.restype = ctypes.c_void_p
_ut_get_status = _lib_ud.ut_get_status
_ut_get_unit_by_name = _lib_ud.ut_get_unit_by_name
_ut_get_unit_by_name.argtypes = [ctypes.c_void_p, ctypes.c_char_p]
_ut_get_unit_by_name.restype = ctypes.c_void_p
_ut_invert = _lib_ud.ut_invert
_ut_invert.argtypes = [ctypes.c_void_p]
_ut_invert.restype = ctypes.c_void_p
_ut_is_dimensionless = _lib_ud.ut_is_dimensionless
_ut_is_dimensionless.argtypes = [ctypes.c_void_p]
_ut_log = _lib_ud.ut_log
_ut_log.argtypes = [ctypes.c_double, ctypes.c_void_p]
_ut_log.restype = ctypes.c_void_p
_ut_multiply = _lib_ud.ut_multiply
_ut_multiply.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
_ut_multiply.restype = ctypes.c_void_p
_ut_offset = _lib_ud.ut_offset
_ut_offset.argtypes = [ctypes.c_void_p, ctypes.c_double]
_ut_offset.restype = ctypes.c_void_p
_ut_offset_by_time = _lib_ud.ut_offset_by_time
_ut_offset_by_time.argtypes = [ctypes.c_void_p, ctypes.c_double]
_ut_offset_by_time.restype = ctypes.c_void_p
_ut_parse = _lib_ud.ut_parse
_ut_parse.argtypes = [ctypes.c_void_p, ctypes.c_char_p, ctypes.c_int]
_ut_parse.restype = ctypes.c_void_p
_ut_raise = _lib_ud.ut_raise
_ut_raise.argtypes = [ctypes.c_void_p, ctypes.c_int]
_ut_raise.restype = ctypes.c_void_p
_ut_read_xml = _lib_ud.ut_read_xml
_ut_read_xml.argtypes = [ctypes.c_char_p]
_ut_read_xml.restype = ctypes.c_void_p
_ut_root = _lib_ud.ut_root
_ut_root.argtypes = [ctypes.c_void_p, ctypes.c_int]
_ut_root.restype = ctypes.c_void_p
_ut_scale = _lib_ud.ut_scale
_ut_scale.argtypes = [ctypes.c_double, ctypes.c_void_p]
_ut_scale.restype = ctypes.c_void_p
# convenience dictionary for the Unit convert method
_cv_convert_scalar = {FLOAT32: _cv_convert_float,
FLOAT64: _cv_convert_double}
_cv_convert_array = {FLOAT32: _cv_convert_floats,
FLOAT64: _cv_convert_doubles}
_numpy2ctypes = {np.float32: FLOAT32, np.float64: FLOAT64}
_ctypes2numpy = {v: k for k, v in _numpy2ctypes.iteritems()}
#
# load the UDUNITS-2 xml-formatted unit-database
#
if not _ud_system:
_func_type = ctypes.CFUNCTYPE(ctypes.c_int, ctypes.c_char_p,
use_errno=True)
_set_handler_type = ctypes.CFUNCTYPE(_func_type, _func_type)
_ut_set_error_message_handler = _set_handler_type((_UT_HANDLER, _lib_ud))
_ut_ignore = _func_type((_UT_IGNORE, _lib_ud))
# ignore standard UDUNITS-2 start-up preamble redirected to stderr stream
_default_handler = _ut_set_error_message_handler(_ut_ignore)
# load the unit-database
_ud_system = _ut_read_xml(None)
# reinstate old error handler
_ut_set_error_message_handler(_default_handler)
del _func_type
if not _ud_system:
_status_msg = 'UNKNOWN'
_error_msg = ''
_status = _ut_get_status()
try:
_status_msg = _UT_STATUS[_status]
except IndexError:
pass
_errno = ctypes.get_errno()
if _errno != 0:
_error_msg = ': "%s"' % _strerror(_errno)
ctypes.set_errno(0)
raise OSError('[%s] Failed to open UDUNITS-2 XML unit database %s' % (
_status_msg, _error_msg))
########################################################################
#
# module level function definitions
#
########################################################################
def encode_time(year, month, day, hour, minute, second):
"""
Return date/clock time encoded as a double precision value.
Encoding performed using UDUNITS-2 hybrid Gregorian/Julian calendar.
Dates on or after 1582-10-15 are assumed to be Gregorian dates;
dates before that are assumed to be Julian dates. In particular, the
year 1 BCE is immediately followed by the year 1 CE.
Args:
* year (int):
Year value to be encoded.
* month (int):
Month value to be encoded.
* day (int):
Day value to be encoded.
* hour (int):
Hour value to be encoded.
* minute (int):
Minute value to be encoded.
* second (int):
Second value to be encoded.
Returns:
float.
For example:
>>> import iris.unit as unit
>>> unit.encode_time(1970, 1, 1, 0, 0, 0)
-978307200.0
"""
return _ut_encode_time(ctypes.c_int(year), ctypes.c_int(month),
ctypes.c_int(day), ctypes.c_int(hour),
ctypes.c_int(minute), ctypes.c_double(second))
def encode_date(year, month, day):
"""
Return date encoded as a double precision value.
Encoding performed using UDUNITS-2 hybrid Gergorian/Julian calendar.
Dates on or after 1582-10-15 are assumed to be Gregorian dates;
dates before that are assumed to be Julian dates. In particular, the
year 1 BCE is immediately followed by the year 1 CE.
Args:
* year (int):
Year value to be encoded.
* month (int):
Month value to be encoded.
* day (int):
Day value to be encoded.
Returns:
float.
For example:
>>> import iris.unit as unit
>>> unit.encode_date(1970, 1, 1)
-978307200.0
"""
return _ut_encode_date(ctypes.c_int(year), ctypes.c_int(month),
ctypes.c_int(day))
def encode_clock(hour, minute, second):
"""
Return clock time encoded as a double precision value.
Args:
* hour (int):
Hour value to be encoded.
* minute (int):
Minute value to be encoded.
* second (int):
Second value to be encoded.
Returns:
float.
For example:
>>> import iris.unit as unit
>>> unit.encode_clock(0, 0, 0)
0.0
"""
return _ut_encode_clock(ctypes.c_int(hour), ctypes.c_int(minute),
ctypes.c_double(second))
def decode_time(time):
"""
Decode a double precision date/clock time value into its component
parts and return as tuple.
Decode time into it's year, month, day, hour, minute, second, and
resolution component parts. Where resolution is the uncertainty of
the time in seconds.
Args:
* time (float): Date/clock time encoded as a double precision value.
Returns:
tuple of (year, month, day, hour, minute, second, resolution).
For example:
>>> import iris.unit as unit
>>> unit.decode_time(unit.encode_time(1970, 1, 1, 0, 0, 0))
(1970, 1, 1, 0, 0, 0.0, 1.086139178596568e-07)
"""
year = ctypes.c_int()
month = ctypes.c_int()
day = ctypes.c_int()
hour = ctypes.c_int()
minute = ctypes.c_int()
second = ctypes.c_double()
resolution = ctypes.c_double()
_ut_decode_time(ctypes.c_double(time), ctypes.pointer(year),
ctypes.pointer(month), ctypes.pointer(day),
ctypes.pointer(hour), ctypes.pointer(minute),
ctypes.pointer(second), ctypes.pointer(resolution))
return (year.value, month.value, day.value, hour.value, minute.value,
second.value, resolution.value)
def julian_day2date(julian_day, calendar):
"""
Return a netcdftime datetime-like object representing the Julian day.
If calendar is 'standard' or 'gregorian', Julian day follows
Julian calendar on and before 1582-10-5, Gregorian calendar after
1582-10-15.
If calendar is 'proleptic_gregorian', Julian Day follows Gregorian
calendar.
If calendar is 'julian', Julian Day follows Julian calendar.
The datetime object is a 'real' datetime object if the date falls in
the Gregorian calendar (i.e. calendar is 'proleptic_gregorian', or
calendar is 'standard'/'gregorian' and the date is after 1582-10-15).
Otherwise, it's a 'phony' datetime object which is actually an instance
of netcdftime.datetime.
Algorithm:
Meeus, Jean (1998) Astronomical Algorithms (2nd Edition).
Willmann-Bell, Virginia. p. 63.
Args:
* julian_day (float):
Julian day with a resolution of 1 second.
* calendar (string):
Name of the calendar, see iris.unit.CALENDARS.
Returns:
datetime or netcdftime.datetime.
For example:
>>> import iris.unit as unit
>>> import datetime
>>> unit.julian_day2date(
... unit.date2julian_day(datetime.datetime(1970, 1, 1, 0, 0, 0),
... unit.CALENDAR_STANDARD),
... unit.CALENDAR_STANDARD)
datetime.datetime(1970, 1, 1, 0, 0)
"""
return netcdftime.DateFromJulianDay(julian_day, calendar)
def date2julian_day(date, calendar):
"""
Return the Julian day (resolution of 1 second) from a netcdftime
datetime-like object.
If calendar is 'standard' or 'gregorian', Julian day follows Julian
calendar on and before 1582-10-5, Gregorian calendar after 1582-10-15.
If calendar is 'proleptic_gregorian', Julian day follows Gregorian
calendar.
If calendar is 'julian', Julian day follows Julian calendar.
Algorithm:
Meeus, Jean (1998) Astronomical Algorithms (2nd Edition).
Willmann-Bell, Virginia. p. 63.
Args:
* date (netcdftime.date):
Date and time representation.
* calendar (string):
Name of the calendar, see iris.unit.CALENDARS.
Returns:
float.
For example:
>>> import iris.unit as unit
>>> import datetime
>>> unit.date2julian_day(datetime.datetime(1970, 1, 1, 0, 0, 0),
... unit.CALENDAR_STANDARD)
2440587.5
"""
return netcdftime.JulianDayFromDate(date, calendar)
def date2num(date, unit, calendar):
"""
Return numeric time value (resolution of 1 second) encoding of
datetime object.
The units of the numeric time values are described by the unit and
calendar arguments. The datetime objects must be in UTC with no
time-zone offset. If there is a time-zone offset in unit, it will be
applied to the returned numeric values.
Like the :func:`matplotlib.dates.date2num` function, except that it allows
for different units and calendars. Behaves the same as if
unit = 'days since 0001-01-01 00:00:00' and
calendar = 'proleptic_gregorian'.
Args:
* date (datetime):
A datetime object or a sequence of datetime objects.
The datetime objects should not include a time-zone offset.
* unit (string):
A string of the form '<time-unit> since <time-origin>' describing
the time units. The <time-unit> can be days, hours, minutes or seconds.
The <time-origin> is a date/time reference point. A valid choice
would be unit='hours since 1800-01-01 00:00:00 -6:00'.
* calendar (string):
Name of the calendar, see iris.unit.CALENDARS.
Returns:
float, or numpy.ndarray of float.
For example:
>>> import iris.unit as unit
>>> import datetime
>>> dt1 = datetime.datetime(1970, 1, 1, 6, 0, 0)
>>> dt2 = datetime.datetime(1970, 1, 1, 7, 0, 0)
>>> unit.date2num(dt1, 'hours since 1970-01-01 00:00:00',
... unit.CALENDAR_STANDARD)
6.0
>>> unit.date2num([dt1, dt2], 'hours since 1970-01-01 00:00:00',
... unit.CALENDAR_STANDARD)
array([ 6., 7.])
"""
#
# ensure to strip out any 'UTC' postfix which is generated by
# UDUNITS-2 formatted output and causes the netcdftime parser
# to choke
#
unit_string = unit.rstrip(" UTC")
if unit_string.endswith(" since epoch"):
unit_string = unit_string.replace("epoch", IRIS_EPOCH)
return netcdftime.date2num(date, unit_string, calendar)
def num2date(time_value, unit, calendar):
"""
Return datetime encoding of numeric time value (resolution of 1 second).
The units of the numeric time value are described by the unit and
calendar arguments. The returned datetime object represent UTC with
no time-zone offset, even if the specified unit contain a time-zone
offset.
Like the :func:`matplotlib.dates.num2date` function, except that it allows
for different units and calendars. Behaves the same if
unit = 'days since 001-01-01 00:00:00'}
calendar = 'proleptic_gregorian'.
The datetime instances returned are 'real' python datetime
objects if the date falls in the Gregorian calendar (i.e.
calendar='proleptic_gregorian', or calendar = 'standard' or 'gregorian'
and the date is after 1582-10-15). Otherwise, they are 'phony' datetime
objects which support some but not all the methods of 'real' python
datetime objects. This is because the python datetime module cannot
use the 'proleptic_gregorian' calendar, even before the switch
occured from the Julian calendar in 1582. The datetime instances
do not contain a time-zone offset, even if the specified unit
contains one.
Args:
* time_value (float):
Numeric time value/s. Maximum resolution is 1 second.
* unit (sting):
A string of the form '<time-unit> since <time-origin>'
describing the time units. The <time-unit> can be days, hours,
minutes or seconds. The <time-origin> is the date/time reference
point. A valid choice would be
unit='hours since 1800-01-01 00:00:00 -6:00'.
* calendar (string):
Name of the calendar, see iris.unit.CALENDARS.
Returns:
datetime, or numpy.ndarray of datetime object.
For example:
>>> import iris.unit as unit
>>> import datetime
>>> unit.num2date(6, 'hours since 1970-01-01 00:00:00',
... unit.CALENDAR_STANDARD)
datetime.datetime(1970, 1, 1, 6, 0)
>>> unit.num2date([6, 7], 'hours since 1970-01-01 00:00:00',
... unit.CALENDAR_STANDARD)
array([1970-01-01 06:00:00, 1970-01-01 07:00:00], dtype=object)
"""
#
# ensure to strip out any 'UTC' postfix which is generated by
# UDUNITS-2 formatted output and causes the netcdftime parser
# to choke
#
unit_string = unit.rstrip(" UTC")
if unit_string.endswith(" since epoch"):
unit_string = unit_string.replace("epoch", IRIS_EPOCH)
return netcdftime.num2date(time_value, unit_string, calendar)
def _handler(func):
"""Set the error message handler."""
_ut_set_error_message_handler(func)
########################################################################
#
# unit wrapper class for unidata/ucar UDUNITS-2
#
########################################################################
def _Unit(category, ut_unit, calendar=None, origin=None):
unit = iris.util._OrderedHashable.__new__(Unit)
unit._init(category, ut_unit, calendar, origin)
return unit
_CACHE = {}
def as_unit(unit):
"""
Returns a Unit corresponding to the given unit.
.. note::
If the given unit is already a Unit it will be returned unchanged.
"""
if isinstance(unit, Unit):
result = unit
else:
result = None
use_cache = isinstance(unit, basestring) or unit is None
if use_cache:
result = _CACHE.get(unit)
if result is None:
result = Unit(unit)
if use_cache:
_CACHE[unit] = result
return result
def is_time(unit):
"""
Determine whether the unit is a related SI Unit of time.
Args:
* unit (string/Unit): Unit to be compared.
Returns:
Boolean.
For example:
>>> import iris.unit as unit
>>> unit.is_time('hours')
True
>>> unit.is_time('meters')
False
"""
return as_unit(unit).is_time()
def is_vertical(unit):
"""
Determine whether the unit is a related SI Unit of pressure or distance.
Args:
* unit (string/Unit): Unit to be compared.
Returns:
Boolean.
For example:
>>> import iris.unit as unit
>>> unit.is_vertical('millibar')
True
>>> unit.is_vertical('km')
True
"""
return as_unit(unit).is_vertical()
class Unit(iris.util._OrderedHashable):
"""
A class to represent S.I. units and support common operations to
manipulate such units in a consistent manner as per UDUNITS-2.
These operations include scaling the unit, offsetting the unit by a
constant or time, inverting the unit, raising the unit by a power,
taking a root of the unit, taking a log of the unit, multiplying the
unit by a constant or another unit, dividing the unit by a constant
or another unit, comparing units, copying units and converting unit
data to single precision or double precision floating point numbers.
This class also supports time and calendar defintion and manipulation.
"""
# Declare the attribute names relevant to the _OrderedHashable behaviour.
_names = ('category', 'ut_unit', 'calendar', 'origin')
category = None
'Is this an unknown unit, a no-unit, or a UDUNITS-2 unit.'
ut_unit = None
'Reference to the ctypes quantity defining the UDUNITS-2 unit.'
calendar = None
'Represents the unit calendar name, see iris.unit.CALENDARS'
origin = None
'The original string used to create this unit.'
__slots__ = ()
def __init__(self, unit, calendar=None):
"""
Create a wrapper instance for UDUNITS-2.
An optional calendar may be provided for a unit which defines a
time reference of the form '<time-unit> since <time-origin>'
i.e. unit='days since 1970-01-01 00:00:00'. For a unit that is a
time reference, the default calendar is 'standard'.
Accepted calendars are as follows,
* 'standard' or 'gregorian' - Mixed Gregorian/Julian calendar as
defined by udunits.
* 'proleptic_gregorian' - A Gregorian calendar extended to dates
before 1582-10-15. A year is a leap year if either,
1. It is divisible by 4 but not by 100, or
2. It is divisible by 400.
* 'noleap' or '365_day' - A Gregorian calendar without leap
years i.e. all years are 365 days long.
* 'all_leap' or '366_day' - A Gregorian calendar with every year
being a leap year i.e. all years are 366 days long.
* '360_day' - All years are 360 days divided into 30 day months.
* 'julian' - Proleptic Julian calendar, extended to dates after
1582-10-5. A year is a leap year if it is divisible by 4.
Args:
* unit:
Specify the unit as defined by UDUNITS-2.
* calendar (string):
Describes the calendar used in time calculations. The
default is 'standard' or 'gregorian' for a time reference
unit.
Returns:
Unit object.
Units should be set to "no_unit" for values which are strings.
Units can also be set to "unknown" (or None).
For example:
>>> from iris.unit import Unit
>>> volts = Unit('volts')
>>> no_unit = Unit('no_unit')
>>> unknown = Unit('unknown')
>>> unknown = Unit(None)
"""
ut_unit = None
calendar_ = None
if unit is None:
unit = ''
else:
unit = str(unit).strip()
if unit.lower().endswith(' utc'):
unit = unit[:unit.lower().rfind(' utc')]
if unit.endswith(" since epoch"):
unit = unit.replace("epoch", IRIS_EPOCH)
if unit.lower() in _UNKNOWN_UNIT:
# TODO - removing the option of an unknown unit. Currently
# the auto generated MOSIG rules are missing units on a
# number of phenomena which would lead to errors.
# Will be addressed by work on metadata translation.
category = _CATEGORY_UNKNOWN
unit = _UNKNOWN_UNIT_STRING
elif unit.lower() in _NO_UNIT:
category = _CATEGORY_NO_UNIT
unit = _NO_UNIT_STRING
else:
category = _CATEGORY_UDUNIT
ut_unit = _ut_parse(_ud_system, unit, UT_ASCII)
# _ut_parse returns 0 on failure
if ut_unit is None:
self._raise_error('Failed to parse unit "%s"' % unit)
if _OP_SINCE in unit.lower():
if calendar is None:
calendar_ = CALENDAR_GREGORIAN
elif calendar in CALENDARS:
calendar_ = calendar
else:
raise ValueError('{!r} is an unsupported calendar.'.format(
calendar))
self._init(category, ut_unit, calendar_, unit)
def _raise_error(self, msg):
"""
Retrieve the UDUNITS-2 ut_status, the implementation-defined string
corresponding to UDUNITS-2 errno and raise generic exception.
"""
status_msg = 'UNKNOWN'
error_msg = ''
if _lib_ud:
status = _ut_get_status()
try:
status_msg = _UT_STATUS[status]
except IndexError:
pass
errno = ctypes.get_errno()
if errno != 0:
error_msg = ': "%s"' % _strerror(errno)
ctypes.set_errno(0)
raise ValueError('[%s] %s %s' % (status_msg, msg, error_msg))
# NOTE:
# "__getstate__" and "__setstate__" functions are defined here to
# provide a custom interface for Pickle
# : Pickle "normal" behaviour is just to save/reinstate the object
# dictionary
# : that won't work here, because the "ut_unit" attribute is an
# object handle
# - the corresponding udunits object only exists in the original
# invocation
def __getstate__(self):
# state capture method for Pickle.dump()
# - return the instance data needed to reconstruct a Unit value
return {'unit_text': self.origin, 'calendar': self.calendar}
def __setstate__(self, state):
# object reconstruction method for Pickle.load()
# intercept the Pickle.load() operation and call own __init__ again
# - this is to ensure a valid ut_unit attribute (as these
# handles aren't persistent)
self.__init__(state['unit_text'], calendar=state['calendar'])
def __del__(self):
# NB. If Python is terminating then the module global "_ut_free"
# may have already been deleted ... so we check before using it.
if _ut_free:
_ut_free(self.ut_unit)
def __copy__(self):
return self
def __deepcopy__(self, memo):
return self
def is_time(self):
"""
Determine whether this unit is a related SI Unit of time.
Returns:
Boolean.
For example:
>>> import iris.unit as unit
>>> u = unit.Unit('hours')
>>> u.is_time()
True
>>> v = unit.Unit('meter')
>>> v.is_time()
False
"""
if self.is_unknown() or self.is_no_unit():
result = False
else:
day = _ut_get_unit_by_name(_ud_system, 'day')
result = _ut_are_convertible(self.ut_unit, day) != 0
return result
def is_vertical(self):
"""
Determine whether the unit is a related SI Unit of pressure or
distance.
Returns:
Boolean.
For example:
>>> import iris.unit as unit
>>> u = unit.Unit('millibar')
>>> u.is_vertical()
True
>>> v = unit.Unit('km')
>>> v.is_vertical()
True
"""
if self.is_unknown() or self.is_no_unit():
result = False
else:
bar = _ut_get_unit_by_name(_ud_system, 'bar')
result = _ut_are_convertible(self.ut_unit, bar) != 0
if not result:
meter = _ut_get_unit_by_name(_ud_system, 'meter')
result = _ut_are_convertible(self.ut_unit, meter) != 0
return result
def is_udunits(self):
"""Return whether the unit is a vaild unit of UDUNITS."""
return self.ut_unit is not None
def is_time_reference(self):
"""
Return whether the unit is a time reference unit of the form
'<time-unit> since <time-origin>'
i.e. unit='days since 1970-01-01 00:00:00'
Returns:
Boolean.
For example:
>>> import iris.unit as unit
>>> u = unit.Unit('days since epoch')
>>> u.is_time_reference()
True
"""
return self.calendar is not None
def title(self, value):
"""
Return the unit value as a title string.
Args:
* value (float): Unit value to be incorporated into title string.
Returns:
string.
For example:
>>> import iris.unit as unit
>>> u = unit.Unit('hours since epoch',
... calendar=unit.CALENDAR_STANDARD)
>>> u.title(10)
'1970-01-01 10:00:00'
"""
if self.is_time_reference():
dt = self.num2date(value)
result = dt.strftime('%Y-%m-%d %H:%M:%S')
else:
result = '%s %s' % (str(value), self)
return result
@property
def modulus(self):
"""
*(read-only)* Return the modulus value of the unit.
Convenience method that returns the unit modulus value as follows,
* 'radians' - pi*2
* 'degrees' - 360.0
* Otherwise None.
Returns:
float.
For example:
>>> import iris.unit as unit
>>> u = unit.Unit('degrees')
>>> u.modulus
360.0
"""
if self == 'radians':
result = np.pi * 2
elif self == 'degrees':
result = 360.0
else:
result = None
return result
def is_convertible(self, other):
"""
Return whether two units are convertible.
Args:
* other (Unit): Unit to be compared.
Returns:
Boolean.
For example:
>>> import iris.unit as unit
>>> u = unit.Unit('meters')
>>> v = unit.Unit('kilometers')
>>> u.is_convertible(v)
True
"""
other = as_unit(other)
if self.is_unknown() or self.is_no_unit() or other.is_unknown() or \
other.is_no_unit():
result = False
else:
result = (self.calendar == other.calendar and
_ut_are_convertible(self.ut_unit, other.ut_unit) != 0)
return result
def is_dimensionless(self):
"""
Return whether the unit is dimensionless.
Returns:
Boolean.
For example:
>>> import iris.unit as unit
>>> u = unit.Unit('meters')
>>> u.is_dimensionless()
False
>>> u = unit.Unit('1')
>>> u.is_dimensionless()
True
"""
return (self.category == _CATEGORY_UDUNIT and
bool(_ut_is_dimensionless(self.ut_unit)))
def is_unknown(self):
"""
Return whether the unit is defined to be an *unknown* unit.
Returns:
Boolean.
For example:
>>> import iris.unit as unit
>>> u = unit.Unit('unknown')
>>> u.is_unknown()
True
>>> u = unit.Unit('meters')
>>> u.is_unknown()
False
"""
return self.category == _CATEGORY_UNKNOWN
def is_no_unit(self):
"""
Return whether the unit is defined to be a *no_unit* unit.
Typically, a quantity such as a string, will have no associated
unit to describe it. Such a class of quantity may be defined
using the *no_unit* unit.
Returns:
Boolean.
For example:
>>> import iris.unit as unit
>>> u = unit.Unit('no unit')
>>> u.is_no_unit()
True
>>> u = unit.Unit('meters')
>>> u.is_no_unit()
False
"""
return self.category == _CATEGORY_NO_UNIT
def format(self, option=None):
"""
Return a formatted string representation of the binary unit.
Args:
* option (iris.unit.UT_FORMATS):
Set the encoding option of the formatted string representation.
Valid encoding options may be one of the following enumerations:
* Unit.UT_ASCII
* Unit.UT_ISO_8859_1
* Unit.UT_LATIN1
* Unit.UT_UTF8
* Unit.UT_NAMES
* Unit.UT_DEFINITION
Multiple options may be combined within a list. The default
option is iris.unit.UT_ASCII.
Returns:
string.
For example:
>>> import iris.unit as unit
>>> u = unit.Unit('meters')
>>> u.format()
'm'
>>> u.format(unit.UT_NAMES)
'meter'
>>> u.format(unit.UT_DEFINITION)
'm'
"""
if self.is_unknown():
return _UNKNOWN_UNIT_STRING
elif self.is_no_unit():
return _NO_UNIT_STRING
else:
bitmask = UT_ASCII
if option is not None:
if not isinstance(option, list):
option = [option]
for i in option:
bitmask |= i
string_buffer = ctypes.create_string_buffer(_STRING_BUFFER_DEPTH)
depth = _ut_format(self.ut_unit, string_buffer,
ctypes.sizeof(string_buffer), bitmask)
if depth < 0:
self._raise_error('Failed to format %r' % self)
return string_buffer.value
@property
def name(self):
"""
*(read-only)* The full name of the unit.
Formats the binary unit into a string representation using
method :func:`iris.unit.Unit.format` with keyword argument
option=iris.unit.UT_NAMES.
Returns:
string.
For example:
>>> import iris.unit as unit
>>> u = unit.Unit('watts')
>>> u.name
'watt'
"""
return self.format(UT_NAMES)
@property
def symbol(self):
"""
*(read-only)* The symbolic representation of the unit.
Formats the binary unit into a string representation using
method :func:`iris.unit.Unit.format`.
Returns:
string.
For example:
>>> import iris.unit as unit
>>> u = unit.Unit('watts')
>>> u.symbol
'W'
"""
if self.is_unknown():
result = _UNKNOWN_UNIT_SYMBOL
elif self.is_no_unit():
result = _NO_UNIT_SYMBOL
else:
result = self.format()
return result
@property
def definition(self):
"""
*(read-only)* The symbolic decomposition of the unit.
Formats the binary unit into a string representation using
method :func:`iris.unit.Unit.format` with keyword argument
option=iris.unit.UT_DEFINITION.
Returns:
string.
For example:
>>> import iris.unit as unit
>>> u = unit.Unit('watts')
>>> u.definition
'm2.kg.s-3'
"""
if self.is_unknown():
result = _UNKNOWN_UNIT_SYMBOL
elif self.is_no_unit():
result = _NO_UNIT_SYMBOL
else:
result = self.format(UT_DEFINITION)
return result
def offset_by_time(self, origin):
"""
Returns the time unit offset with respect to the time origin.
Args:
* origin (float): Time origin as returned by the
:func:`iris.unit.encode_time` method.
Returns:
None.
For example:
>>> import iris.unit as unit
>>> u = unit.Unit('hours')
>>> u.offset_by_time(unit.encode_time(1970, 1, 1, 0, 0, 0))
Unit('hour since 1970-01-01 00:00:00.0000000 UTC')
"""
if not isinstance(origin, (int, float, long)):
raise TypeError('a numeric type for the origin argument is'
' required')
ut_unit = _ut_offset_by_time(self.ut_unit, ctypes.c_double(origin))
if not ut_unit:
self._raise_error('Failed to offset %r' % self)
calendar = None
return _Unit(_CATEGORY_UDUNIT, ut_unit, calendar)
def invert(self):
"""
Invert the unit i.e. find the reciprocal of the unit, and return
the Unit result.
Returns:
Unit.
For example:
>>> import iris.unit as unit
>>> u = unit.Unit('meters')
>>> u.invert()
Unit('meter^-1')
"""
if self.is_unknown():
result = self
elif self.is_no_unit():
raise ValueError("Cannot invert a 'no-unit'.")
else:
ut_unit = _ut_invert(self.ut_unit)
if not ut_unit:
self._raise_error('Failed to invert %r' % self)
calendar = None
result = _Unit(_CATEGORY_UDUNIT, ut_unit, calendar)
return result
def root(self, root):
"""
Returns the given root of the unit.
Args:
* root (int/long): Value by which the unit root is taken.
Returns:
None.
For example:
>>> import iris.unit as unit
>>> u = unit.Unit('meters^2')
>>> u.root(2)
Unit('meter')
.. note::
Taking a fractional root of a unit is not supported.
"""
try:
root = ctypes.c_int(root)
except TypeError:
raise TypeError('An int or long type for the root argument'
' is required')
if self.is_unknown():
result = self
elif self.is_no_unit():
raise ValueError("Cannot take the logarithm of a 'no-unit'.")
else:
# only update the unit if it is not scalar
if self == Unit('1'):
result = self
else:
ut_unit = _ut_root(self.ut_unit, root)
if not ut_unit:
self._raise_error('Failed to take the root of %r' % self)
calendar = None
result = _Unit(_CATEGORY_UDUNIT, ut_unit, calendar)
return result
def log(self, base):
"""
Returns the logorithmic unit corresponding to the given
logorithmic base.
Args:
* base (int/float/long): Value of the logorithmic base.
Returns:
None.
For example:
>>> import iris.unit as unit
>>> u = unit.Unit('meters')
>>> u.log(2)
Unit('lb(re 1 meter)')
"""
try:
base = ctypes.c_double(base)
except TypeError:
raise TypeError('A numeric type for the base argument is required')
if self.is_unknown():
result = self
elif self.is_no_unit():
raise ValueError("Cannot take the logarithm of a 'no-unit'.")
else:
ut_unit = _ut_log(base, self.ut_unit)
if not ut_unit:
msg = 'Failed to calculate logorithmic base of %r' % self
self._raise_error(msg)
calendar = None
result = _Unit(_CATEGORY_UDUNIT, ut_unit, calendar)
return result
def __str__(self):
"""
Returns a simple string representation of the unit.
Returns:
string.
For example:
>>> import iris.unit as unit
>>> u = unit.Unit('meters')
>>> str(u)
'meters'
"""
return self.origin or self.name
def __repr__(self):
"""
Returns a string representation of the unit object.
Returns:
string.
For example:
>>> import iris.unit as unit
>>> u = unit.Unit('meters')
>>> repr(u)
"Unit('meters')"
"""
if self.calendar is None:
result = "%s('%s')" % (self.__class__.__name__, self)
else:
result = "%s('%s', calendar='%s')" % (self.__class__.__name__,
self, self.calendar)
return result
def _offset_common(self, offset):
try:
offset = ctypes.c_double(offset)
except TypeError:
result = NotImplemented
else:
if self.is_unknown():
result = self
elif self.is_no_unit():
raise ValueError("Cannot offset a 'no-unit'.")
else:
ut_unit = _ut_offset(self.ut_unit, offset)
if not ut_unit:
self._raise_error('Failed to offset %r' % self)
calendar = None
result = _Unit(_CATEGORY_UDUNIT, ut_unit, calendar)
return result
def __add__(self, other):
return self._offset_common(other)
def __sub__(self, other):
try:
other = -other
except TypeError:
result = NotImplemented
else:
result = self._offset_common(-other)
return result
def _op_common(self, other, op_func):
# Convienience method to create a new unit from an operation between
# the units 'self' and 'other'.
op_label = op_func.__name__.split('_')[1]
other = as_unit(other)
if self.is_no_unit() or other.is_no_unit():
raise ValueError("Cannot %s a 'no-unit'." % op_label)
if self.is_unknown() or other.is_unknown():
result = _Unit(_CATEGORY_UNKNOWN, None)
else:
ut_unit = op_func(self.ut_unit, other.ut_unit)
if not ut_unit:
msg = 'Failed to %s %r by %r' % (op_label, self, other)
self._raise_error(msg)
calendar = None
result = _Unit(_CATEGORY_UDUNIT, ut_unit, calendar)
return result
def __rmul__(self, other):
# NB. Because we've subclassed a tuple, we need to define this to
# prevent the default tuple-repetition behaviour.
# ie. 2 * ('a', 'b') -> ('a', 'b', 'a', 'b')
return self * other
def __mul__(self, other):
"""
Multiply the self unit by the other scale factor or unit and
return the Unit result.
Note that, multiplication involving an 'unknown' unit will always
result in an 'unknown' unit.
Args:
* other (int/float/long/string/Unit): Multiplication scale
factor or unit.
Returns:
Unit.
For example:
>>> import iris.unit as unit
>>> u = unit.Unit('meters')
>>> v = unit.Unit('hertz')
>>> u*v
Unit('meter-second^-1')
"""
return self._op_common(other, _ut_multiply)
def __div__(self, other):
"""
Divide the self unit by the other scale factor or unit and
return the Unit result.
Note that, division involving an 'unknown' unit will always
result in an 'unknown' unit.
Args:
* other (int/float/long/string/Unit): Division scale factor or unit.
Returns:
Unit.
For example:
>>> import iris.unit as unit
>>> u = unit.Unit('m.s-1')
>>> v = unit.Unit('hertz')
>>> u/v
Unit('meter')
"""
return self._op_common(other, _ut_divide)
def __truediv__(self, other):
"""
Divide the self unit by the other scale factor or unit and
return the Unit result.
Note that, division involving an 'unknown' unit will always
result in an 'unknown' unit.
Args:
* other (int/float/long/string/Unit): Division scale factor or unit.
Returns:
Unit.
For example:
>>> import iris.unit as unit
>>> u = unit.Unit('m.s-1')
>>> v = unit.Unit('hertz')
>>> u/v
Unit('meter')
"""
return self.__div__(other)
def __pow__(self, power):
"""
Raise the unit by the given power and return the Unit result.
Note that, UDUNITS-2 does not support raising a
non-dimensionless unit by a fractional power.
Approximate floating point power behaviour has been implemented
specifically for Iris.
Args:
* power (int/float/long): Value by which the unit power is raised.
Returns:
Unit.
For example:
>>> import iris.unit as unit
>>> u = unit.Unit('meters')
>>> u**2
Unit('meter^2')
"""
try:
power = float(power)
except ValueError:
raise TypeError('A numeric value is required for the power'
' argument.')
if self.is_unknown():
result = self
elif self.is_no_unit():
raise ValueError("Cannot raise the power of a 'no-unit'.")
elif self == Unit('1'):
# 1 ** N -> 1
result = self
else:
# UDUNITS-2 does not support floating point raise/root.
# But if the power is of the form 1/N, where N is an integer
# (within a certain acceptable accuracy) then we can find the Nth
# root.
if not iris.util.approx_equal(power, 0.0) and abs(power) < 1:
if not iris.util.approx_equal(1 / power, round(1 / power)):
raise ValueError('Cannot raise a unit by a decimal.')
root = int(round(1 / power))
result = self.root(root)
else:
# Failing that, check for powers which are (very nearly) simple
# integer values.
if not iris.util.approx_equal(power, round(power)):
msg = 'Cannot raise a unit by a decimal (got %s).' % power
raise ValueError(msg)
power = int(round(power))
ut_unit = _ut_raise(self.ut_unit, ctypes.c_int(power))
if not ut_unit:
self._raise_error('Failed to raise the power of %r' % self)
result = _Unit(_CATEGORY_UDUNIT, ut_unit)
return result
def _identity(self):
# Redefine the comparison/hash/ordering identity as used by
# iris.util._OrderedHashable.
return (self.name, self.calendar)
def __eq__(self, other):
"""
Compare the two units for equality and return the boolean result.
Args:
* other (string/Unit): Unit to be compared.
Returns:
Boolean.
For example:
>>> from iris.unit import Unit
>>> Unit('meters') == Unit('millimeters')
False
>>> Unit('meters') == 'm'
True
"""
other = as_unit(other)
# Compare category (i.e. unknown, no_unit, etc.).
if self.category != other.category:
return False
# Compare calendar as UDUNITS cannot handle calendars.
if self.calendar != other.calendar:
return False
# Compare UDUNITS.
res = _ut_compare(self.ut_unit, other.ut_unit)
return res == 0
def __ne__(self, other):
"""
Compare the two units for inequality and return the boolean result.
Args:
* other (string/Unit): Unit to be compared.
Returns:
Boolean.
For example:
>>> from iris.unit import Unit
>>> Unit('meters') != Unit('millimeters')
True
>>> Unit('meters') != 'm'
False
"""
return not self == other
def convert(self, value, other, ctype=FLOAT64):
"""
Converts a single value or numpy array of values from the current unit
to the other target unit.
If the units are not convertible, then no conversion will take place.
Args:
* value (int/float/long/numpy.ndarray):
Value/s to be converted.
* other (string/Unit):
Target unit to convert to.
* ctype (ctypes.c_float/ctypes.c_double):
Floating point 32-bit single-precision (iris.unit.FLOAT32) or
64-bit double-precision (iris.unit.FLOAT64) of conversion. The
default is 64-bit double-precision conversion.
Returns:
float or numpy.ndarray of appropriate float type.
For example:
>>> import iris.unit as unit
>>> import numpy as np
>>> c = unit.Unit('deg_c')
>>> f = unit.Unit('deg_f')
>>> print c.convert(0, f)
32.0
>>> c.convert(0, f, unit.FLOAT32)
32.0
>>> a64 = np.arange(10, dtype=np.float64)
>>> c.convert(a64, f)
array([ 32. , 33.8, 35.6, 37.4, 39.2, 41. , 42.8, 44.6, \
46.4, 48.2])
>>> a32 = np.arange(10, dtype=np.float32)
>>> c.convert(a32, f)
array([ 32. , 33.79999924, 35.59999847, 37.40000153,
39.20000076, 41. , 42.79999924, 44.59999847,
46.40000153, 48.20000076], dtype=float32)
.. note::
Conversion is done *in-place* for numpy arrays. Also note that,
conversion between unit calendars is not permitted.
"""
result = None
other = as_unit(other)
value_copy = copy.deepcopy(value)
if self == other:
return value
if self.is_convertible(other):
# Use utime for converting reference times that are not using a
# gregorian calendar as it handles these and udunits does not.
if self.is_time_reference() and self.calendar is not 'gregorian':
ut1 = self.utime()
ut2 = other.utime()
result = ut2.date2num(ut1.num2date(value_copy))
else:
ut_converter = _ut_get_converter(self.ut_unit, other.ut_unit)
if ut_converter:
if isinstance(value_copy, np.ndarray):
# Can only handle array of np.float32 or np.float64 so
# cast array of ints to array of floats of requested
# precision.
if issubclass(value_copy.dtype.type, np.integer):
value_copy = value_copy.astype(
_ctypes2numpy[ctype])
# strict type check of numpy array
if value_copy.dtype.type not in _numpy2ctypes.keys():
raise TypeError(
"Expect a numpy array of '%s' or '%s'" %
tuple(sorted(_numpy2ctypes.keys())))
ctype = _numpy2ctypes[value_copy.dtype.type]
pointer = value_copy.ctypes.data_as(
ctypes.POINTER(ctype))
# Utilise global convenience dictionary
# _cv_convert_array
_cv_convert_array[ctype](ut_converter, pointer,
value_copy.size, pointer)
result = value_copy
else:
if ctype not in _cv_convert_scalar.keys():
raise ValueError('Invalid target type. Can only '
'convert to float or double.')
# Utilise global convenience dictionary
# _cv_convert_scalar
result = _cv_convert_scalar[ctype](ut_converter,
ctype(value_copy))
_cv_free(ut_converter)
else:
self._raise_error('Failed to convert %r to %r' %
(self, other))
else:
raise ValueError("Unable to convert from '%s' to '%s'." %
(self, other))
return result
def utime(self):
"""
Returns a netcdftime.utime object which performs conversions of
numeric time values to/from datetime objects given the current
calendar and unit time reference.
The current unit time reference must be of the form:
'<time-unit> since <time-origin>'
i.e. 'hours since 1970-01-01 00:00:00'
Returns:
netcdftime.utime.
For example:
>>> import iris.unit as unit
>>> u = unit.Unit('hours since 1970-01-01 00:00:00',
... calendar=unit.CALENDAR_STANDARD)
>>> ut = u.utime()
>>> ut.num2date(2)
datetime.datetime(1970, 1, 1, 2, 0)
"""
#
# ensure to strip out non-parsable 'UTC' postfix which
# is generated by UDUNITS-2 formatted output
#
if self.calendar is None:
raise ValueError('Unit has undefined calendar')
return netcdftime.utime(str(self).rstrip(" UTC"), self.calendar)
def date2num(self, date):
"""
Returns the numeric time value calculated from the datetime
object using the current calendar and unit time reference.
The current unit time reference must be of the form:
'<time-unit> since <time-origin>'
i.e. 'hours since 1970-01-01 00:00:00'
Works for scalars, sequences and numpy arrays. Returns a scalar
if input is a scalar, else returns a numpy array.
Args:
* date (datetime):
A datetime object or a sequence of datetime objects.
The datetime objects should not include a time-zone offset.
Returns:
float or numpy.ndarray of float.
For example:
>>> import iris.unit as unit
>>> import datetime
>>> u = unit.Unit('hours since 1970-01-01 00:00:00',
... calendar=unit.CALENDAR_STANDARD)
>>> u.date2num(datetime.datetime(1970, 1, 1, 5))
5.00000000372529
>>> u.date2num([datetime.datetime(1970, 1, 1, 5),
... datetime.datetime(1970, 1, 1, 6)])
array([ 5., 6.])
"""
cdf_utime = self.utime()
return cdf_utime.date2num(date)
def num2date(self, time_value):
"""
Returns a datetime-like object calculated from the numeric time
value using the current calendar and the unit time reference.
The current unit time reference must be of the form:
'<time-unit> since <time-origin>'
i.e. 'hours since 1970-01-01 00:00:00'
The datetime objects returned are 'real' Python datetime objects
if the date falls in the Gregorian calendar (i.e. the calendar
is 'standard', 'gregorian', or 'proleptic_gregorian' and the
date is after 1582-10-15). Otherwise a 'phoney' datetime-like
object (netcdftime.datetime) is returned which can handle dates
that don't exist in the Proleptic Gregorian calendar.
Works for scalars, sequences and numpy arrays. Returns a scalar
if input is a scalar, else returns a numpy array.
Args:
* time_value (float): Numeric time value/s. Maximum resolution
is 1 second.
Returns:
datetime, or numpy.ndarray of datetime object.
For example:
>>> import iris.unit as unit
>>> u = unit.Unit('hours since 1970-01-01 00:00:00',
... calendar=unit.CALENDAR_STANDARD)
>>> u.num2date(6)
datetime.datetime(1970, 1, 1, 6, 0)
>>> u.num2date([6, 7])
array([1970-01-01 06:00:00, 1970-01-01 07:00:00], dtype=object)
"""
cdf_utime = self.utime()
return cdf_utime.num2date(time_value)
| gpl-3.0 |
liuchengtian/CS523 | steerstats/tools/plotting/plotBarMetricOptEntropy.py | 8 | 1518 | #!/usr/bin/env python
# a bar plot with errorbars
import numpy as np
import matplotlib.pyplot as plt
N = 2
scale = 100.0
ppr = np.array([[3.41784, 3.403440],
[1.91507, 2.271680]])
ppr = np.divide(ppr[0] - ppr[1], ppr[0]) * scale
orca = np.array([[2.117200, 2.953220],
[0.628748, 2.203690]])
orca = np.divide(orca[0] - orca[1], orca[0]) * scale
sf = np.array([[3.741280, 3.620520],
[3.098120, 2.757230]])
sf = np.divide(sf[0] - sf[1], sf[0]) * scale
"""
max = np.amax(ppr)
ppr = ppr / max
orca = orca / max
sf = sf / max
"""
# menStd = (2, 3, 4, 1, 2)
ind = np.arange(N) # the x locations for the groups
width = 0.25 # the width of the bars
adjust=0.5
fig, ax = plt.subplots()
rects1 = ax.bar(ind, ppr, width, color='r')
# womenStd = (3, 5, 2, 3, 3)
rects2 = ax.bar(ind+width, orca, width, color='b')
rects3 = ax.bar(ind+(width*2.0), sf, width, color='g')
# add some
# ax.set_ylabel('Scores')
# ax.set_title('Scores by group and gender')
ax.set_xticks(ind+(width*1.5))
# ax.set_xticklabels( ('d', 'q^d', 'q^t', 'q^e', 'e', 'u') )
ax.set_xticklabels( ('', '', '', '', '', '') )
ax.legend( (rects1[0], rects2[0], rects3), ('PPR', 'ORCA', 'SF', ) )
def autolabel(rects):
# attach some text labels
for rect in rects:
height = rect.get_height()
ax.text(rect.get_x()+rect.get_width()/2., 1.05*height, '%d'%int(height),
ha='center', va='bottom')
# autolabel(rects1)
# autolabel(rects2)
# autolabel(rects3)
plt.show() | gpl-3.0 |
hammerlab/datacache | setup.py | 1 | 2448 | # Copyright (c) 2014-2018. Mount Sinai School of Medicine
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import logging
import re
from setuptools import setup
readme_filename = "README.md"
current_directory = os.path.dirname(__file__)
readme_path = os.path.join(current_directory, readme_filename)
try:
with open(readme_path, "r") as f:
readme_markdown = f.read()
except:
logging.warn("Failed to load %s", readme_filename)
readme_markdown = ""
try:
import pypandoc
readme_restructured = pypandoc.convert(readme_markdown, to="rst", format="md")
except:
readme_restructured = readme_markdown
logging.warn("Failed to convert %s to reStructuredText", readme_filename)
with open('datacache/__init__.py', 'r') as f:
version = re.search(
r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]',
f.read(),
re.MULTILINE).group(1)
if __name__ == "__main__":
setup(
name="datacache",
version=version,
description="Helpers for transparently downloading datasets",
author="Alex Rubinsteyn",
author_email="[email protected]",
url="https://github.com/openvax/datacache",
license="http://www.apache.org/licenses/LICENSE-2.0.html",
classifiers=[
"Development Status :: 3 - Alpha",
"Environment :: Console",
"Operating System :: OS Independent",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python",
"Topic :: Scientific/Engineering :: Bio-Informatics",
],
install_requires=[
"pandas>=0.15.2",
"appdirs>=1.4.0",
"progressbar33>=2.4",
"requests>=2.5.1",
"typechecks>=0.0.2",
"mock",
],
long_description=readme_restructured,
packages=["datacache"],
)
| apache-2.0 |
samzhang111/scikit-learn | sklearn/datasets/base.py | 22 | 22973 | """
Base IO code for all datasets
"""
# Copyright (c) 2007 David Cournapeau <[email protected]>
# 2010 Fabian Pedregosa <[email protected]>
# 2010 Olivier Grisel <[email protected]>
# License: BSD 3 clause
import os
import csv
import sys
import shutil
from os import environ
from os.path import dirname
from os.path import join
from os.path import exists
from os.path import expanduser
from os.path import isdir
from os.path import splitext
from os import listdir
from os import makedirs
import numpy as np
from ..utils import check_random_state
class Bunch(dict):
"""Container object for datasets
Dictionary-like object that exposes its keys as attributes.
>>> b = Bunch(a=1, b=2)
>>> b['b']
2
>>> b.b
2
>>> b.a = 3
>>> b['a']
3
>>> b.c = 6
>>> b['c']
6
"""
def __init__(self, **kwargs):
super(Bunch, self).__init__(kwargs)
def __setattr__(self, key, value):
self[key] = value
def __getattr__(self, key):
try:
return self[key]
except KeyError:
raise AttributeError(key)
def __getstate__(self):
return self.__dict__
def get_data_home(data_home=None):
"""Return the path of the scikit-learn data dir.
This folder is used by some large dataset loaders to avoid
downloading the data several times.
By default the data dir is set to a folder named 'scikit_learn_data'
in the user home folder.
Alternatively, it can be set by the 'SCIKIT_LEARN_DATA' environment
variable or programmatically by giving an explicit folder path. The
'~' symbol is expanded to the user home folder.
If the folder does not already exist, it is automatically created.
"""
if data_home is None:
data_home = environ.get('SCIKIT_LEARN_DATA',
join('~', 'scikit_learn_data'))
data_home = expanduser(data_home)
if not exists(data_home):
makedirs(data_home)
return data_home
def clear_data_home(data_home=None):
"""Delete all the content of the data home cache."""
data_home = get_data_home(data_home)
shutil.rmtree(data_home)
def load_files(container_path, description=None, categories=None,
load_content=True, shuffle=True, encoding=None,
decode_error='strict', random_state=0):
"""Load text files with categories as subfolder names.
Individual samples are assumed to be files stored a two levels folder
structure such as the following:
container_folder/
category_1_folder/
file_1.txt
file_2.txt
...
file_42.txt
category_2_folder/
file_43.txt
file_44.txt
...
The folder names are used as supervised signal label names. The
individual file names are not important.
This function does not try to extract features into a numpy array or
scipy sparse matrix. In addition, if load_content is false it
does not try to load the files in memory.
To use text files in a scikit-learn classification or clustering
algorithm, you will need to use the `sklearn.feature_extraction.text`
module to build a feature extraction transformer that suits your
problem.
If you set load_content=True, you should also specify the encoding of
the text using the 'encoding' parameter. For many modern text files,
'utf-8' will be the correct encoding. If you leave encoding equal to None,
then the content will be made of bytes instead of Unicode, and you will
not be able to use most functions in `sklearn.feature_extraction.text`.
Similar feature extractors should be built for other kind of unstructured
data input such as images, audio, video, ...
Read more in the :ref:`User Guide <datasets>`.
Parameters
----------
container_path : string or unicode
Path to the main folder holding one subfolder per category
description: string or unicode, optional (default=None)
A paragraph describing the characteristic of the dataset: its source,
reference, etc.
categories : A collection of strings or None, optional (default=None)
If None (default), load all the categories.
If not None, list of category names to load (other categories ignored).
load_content : boolean, optional (default=True)
Whether to load or not the content of the different files. If
true a 'data' attribute containing the text information is present
in the data structure returned. If not, a filenames attribute
gives the path to the files.
encoding : string or None (default is None)
If None, do not try to decode the content of the files (e.g. for
images or other non-text content).
If not None, encoding to use to decode text files to Unicode if
load_content is True.
decode_error: {'strict', 'ignore', 'replace'}, optional
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. Passed as keyword
argument 'errors' to bytes.decode.
shuffle : bool, optional (default=True)
Whether or not to shuffle the data: might be important for models that
make the assumption that the samples are independent and identically
distributed (i.i.d.), such as stochastic gradient descent.
random_state : int, RandomState instance or None, optional (default=0)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are: either
data, the raw text data to learn, or 'filenames', the files
holding it, 'target', the classification labels (integer index),
'target_names', the meaning of the labels, and 'DESCR', the full
description of the dataset.
"""
target = []
target_names = []
filenames = []
folders = [f for f in sorted(listdir(container_path))
if isdir(join(container_path, f))]
if categories is not None:
folders = [f for f in folders if f in categories]
for label, folder in enumerate(folders):
target_names.append(folder)
folder_path = join(container_path, folder)
documents = [join(folder_path, d)
for d in sorted(listdir(folder_path))]
target.extend(len(documents) * [label])
filenames.extend(documents)
# convert to array for fancy indexing
filenames = np.array(filenames)
target = np.array(target)
if shuffle:
random_state = check_random_state(random_state)
indices = np.arange(filenames.shape[0])
random_state.shuffle(indices)
filenames = filenames[indices]
target = target[indices]
if load_content:
data = []
for filename in filenames:
with open(filename, 'rb') as f:
data.append(f.read())
if encoding is not None:
data = [d.decode(encoding, decode_error) for d in data]
return Bunch(data=data,
filenames=filenames,
target_names=target_names,
target=target,
DESCR=description)
return Bunch(filenames=filenames,
target_names=target_names,
target=target,
DESCR=description)
def load_iris():
"""Load and return the iris dataset (classification).
The iris dataset is a classic and very easy multi-class classification
dataset.
================= ==============
Classes 3
Samples per class 50
Samples total 150
Dimensionality 4
Features real, positive
================= ==============
Read more in the :ref:`User Guide <datasets>`.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'target', the classification labels,
'target_names', the meaning of the labels, 'feature_names', the
meaning of the features, and 'DESCR', the
full description of the dataset.
Examples
--------
Let's say you are interested in the samples 10, 25, and 50, and want to
know their class name.
>>> from sklearn.datasets import load_iris
>>> data = load_iris()
>>> data.target[[10, 25, 50]]
array([0, 0, 1])
>>> list(data.target_names)
['setosa', 'versicolor', 'virginica']
"""
module_path = dirname(__file__)
with open(join(module_path, 'data', 'iris.csv')) as csv_file:
data_file = csv.reader(csv_file)
temp = next(data_file)
n_samples = int(temp[0])
n_features = int(temp[1])
target_names = np.array(temp[2:])
data = np.empty((n_samples, n_features))
target = np.empty((n_samples,), dtype=np.int)
for i, ir in enumerate(data_file):
data[i] = np.asarray(ir[:-1], dtype=np.float64)
target[i] = np.asarray(ir[-1], dtype=np.int)
with open(join(module_path, 'descr', 'iris.rst')) as rst_file:
fdescr = rst_file.read()
return Bunch(data=data, target=target,
target_names=target_names,
DESCR=fdescr,
feature_names=['sepal length (cm)', 'sepal width (cm)',
'petal length (cm)', 'petal width (cm)'])
def load_breast_cancer():
"""Load and return the breast cancer wisconsin dataset (classification).
The breast cancer dataset is a classic and very easy binary classification
dataset.
================= ==============
Classes 2
Samples per class 212(M),357(B)
Samples total 569
Dimensionality 30
Features real, positive
================= ==============
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'target', the classification labels,
'target_names', the meaning of the labels, 'feature_names', the
meaning of the features, and 'DESCR', the
full description of the dataset.
The copy of UCI ML Breast Cancer Wisconsin (Diagnostic) dataset is
downloaded from:
https://goo.gl/U2Uwz2
Examples
--------
Let's say you are interested in the samples 10, 50, and 85, and want to
know their class name.
>>> from sklearn.datasets import load_breast_cancer
>>> data = load_breast_cancer()
>>> data.target[[10, 50, 85]]
array([0, 1, 0])
>>> list(data.target_names)
['malignant', 'benign']
"""
module_path = dirname(__file__)
with open(join(module_path, 'data', 'breast_cancer.csv')) as csv_file:
data_file = csv.reader(csv_file)
first_line = next(data_file)
n_samples = int(first_line[0])
n_features = int(first_line[1])
target_names = np.array(first_line[2:4])
data = np.empty((n_samples, n_features))
target = np.empty((n_samples,), dtype=np.int)
for count, value in enumerate(data_file):
data[count] = np.asarray(value[:-1], dtype=np.float64)
target[count] = np.asarray(value[-1], dtype=np.int)
with open(join(module_path, 'descr', 'breast_cancer.rst')) as rst_file:
fdescr = rst_file.read()
feature_names = np.array(['mean radius', 'mean texture',
'mean perimeter', 'mean area',
'mean smoothness', 'mean compactness',
'mean concavity', 'mean concave points',
'mean symmetry', 'mean fractal dimension',
'radius error', 'texture error',
'perimeter error', 'area error',
'smoothness error', 'compactness error',
'concavity error', 'concave points error',
'symmetry error', 'fractal dimension error',
'worst radius', 'worst texture',
'worst perimeter', 'worst area',
'worst smoothness', 'worst compactness',
'worst concavity', 'worst concave points',
'worst symmetry', 'worst fractal dimension'])
return Bunch(data=data, target=target,
target_names=target_names,
DESCR=fdescr,
feature_names=feature_names)
def load_digits(n_class=10):
"""Load and return the digits dataset (classification).
Each datapoint is a 8x8 image of a digit.
================= ==============
Classes 10
Samples per class ~180
Samples total 1797
Dimensionality 64
Features integers 0-16
================= ==============
Read more in the :ref:`User Guide <datasets>`.
Parameters
----------
n_class : integer, between 0 and 10, optional (default=10)
The number of classes to return.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'images', the images corresponding
to each sample, 'target', the classification labels for each
sample, 'target_names', the meaning of the labels, and 'DESCR',
the full description of the dataset.
Examples
--------
To load the data and visualize the images::
>>> from sklearn.datasets import load_digits
>>> digits = load_digits()
>>> print(digits.data.shape)
(1797, 64)
>>> import pylab as pl #doctest: +SKIP
>>> pl.gray() #doctest: +SKIP
>>> pl.matshow(digits.images[0]) #doctest: +SKIP
>>> pl.show() #doctest: +SKIP
"""
module_path = dirname(__file__)
data = np.loadtxt(join(module_path, 'data', 'digits.csv.gz'),
delimiter=',')
with open(join(module_path, 'descr', 'digits.rst')) as f:
descr = f.read()
target = data[:, -1]
flat_data = data[:, :-1]
images = flat_data.view()
images.shape = (-1, 8, 8)
if n_class < 10:
idx = target < n_class
flat_data, target = flat_data[idx], target[idx]
images = images[idx]
return Bunch(data=flat_data,
target=target.astype(np.int),
target_names=np.arange(10),
images=images,
DESCR=descr)
def load_diabetes():
"""Load and return the diabetes dataset (regression).
============== ==================
Samples total 442
Dimensionality 10
Features real, -.2 < x < .2
Targets integer 25 - 346
============== ==================
Read more in the :ref:`User Guide <datasets>`.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn and 'target', the regression target for each
sample.
"""
base_dir = join(dirname(__file__), 'data')
data = np.loadtxt(join(base_dir, 'diabetes_data.csv.gz'))
target = np.loadtxt(join(base_dir, 'diabetes_target.csv.gz'))
return Bunch(data=data, target=target)
def load_linnerud():
"""Load and return the linnerud dataset (multivariate regression).
Samples total: 20
Dimensionality: 3 for both data and targets
Features: integer
Targets: integer
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are: 'data' and
'targets', the two multivariate datasets, with 'data' corresponding to
the exercise and 'targets' corresponding to the physiological
measurements, as well as 'feature_names' and 'target_names'.
"""
base_dir = join(dirname(__file__), 'data/')
# Read data
data_exercise = np.loadtxt(base_dir + 'linnerud_exercise.csv', skiprows=1)
data_physiological = np.loadtxt(base_dir + 'linnerud_physiological.csv',
skiprows=1)
# Read header
with open(base_dir + 'linnerud_exercise.csv') as f:
header_exercise = f.readline().split()
with open(base_dir + 'linnerud_physiological.csv') as f:
header_physiological = f.readline().split()
with open(dirname(__file__) + '/descr/linnerud.rst') as f:
descr = f.read()
return Bunch(data=data_exercise, feature_names=header_exercise,
target=data_physiological,
target_names=header_physiological,
DESCR=descr)
def load_boston():
"""Load and return the boston house-prices dataset (regression).
============== ==============
Samples total 506
Dimensionality 13
Features real, positive
Targets real 5. - 50.
============== ==============
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'target', the regression targets,
and 'DESCR', the full description of the dataset.
Examples
--------
>>> from sklearn.datasets import load_boston
>>> boston = load_boston()
>>> print(boston.data.shape)
(506, 13)
"""
module_path = dirname(__file__)
fdescr_name = join(module_path, 'descr', 'boston_house_prices.rst')
with open(fdescr_name) as f:
descr_text = f.read()
data_file_name = join(module_path, 'data', 'boston_house_prices.csv')
with open(data_file_name) as f:
data_file = csv.reader(f)
temp = next(data_file)
n_samples = int(temp[0])
n_features = int(temp[1])
data = np.empty((n_samples, n_features))
target = np.empty((n_samples,))
temp = next(data_file) # names of features
feature_names = np.array(temp)
for i, d in enumerate(data_file):
data[i] = np.asarray(d[:-1], dtype=np.float64)
target[i] = np.asarray(d[-1], dtype=np.float64)
return Bunch(data=data,
target=target,
# last column is target value
feature_names=feature_names[:-1],
DESCR=descr_text)
def load_sample_images():
"""Load sample images for image manipulation.
Loads both, ``china`` and ``flower``.
Returns
-------
data : Bunch
Dictionary-like object with the following attributes :
'images', the two sample images, 'filenames', the file
names for the images, and 'DESCR'
the full description of the dataset.
Examples
--------
To load the data and visualize the images:
>>> from sklearn.datasets import load_sample_images
>>> dataset = load_sample_images() #doctest: +SKIP
>>> len(dataset.images) #doctest: +SKIP
2
>>> first_img_data = dataset.images[0] #doctest: +SKIP
>>> first_img_data.shape #doctest: +SKIP
(427, 640, 3)
>>> first_img_data.dtype #doctest: +SKIP
dtype('uint8')
"""
# Try to import imread from scipy. We do this lazily here to prevent
# this module from depending on PIL.
try:
try:
from scipy.misc import imread
except ImportError:
from scipy.misc.pilutil import imread
except ImportError:
raise ImportError("The Python Imaging Library (PIL) "
"is required to load data from jpeg files")
module_path = join(dirname(__file__), "images")
with open(join(module_path, 'README.txt')) as f:
descr = f.read()
filenames = [join(module_path, filename)
for filename in os.listdir(module_path)
if filename.endswith(".jpg")]
# Load image data for each image in the source folder.
images = [imread(filename) for filename in filenames]
return Bunch(images=images,
filenames=filenames,
DESCR=descr)
def load_sample_image(image_name):
"""Load the numpy array of a single sample image
Parameters
-----------
image_name: {`china.jpg`, `flower.jpg`}
The name of the sample image loaded
Returns
-------
img: 3D array
The image as a numpy array: height x width x color
Examples
---------
>>> from sklearn.datasets import load_sample_image
>>> china = load_sample_image('china.jpg') # doctest: +SKIP
>>> china.dtype # doctest: +SKIP
dtype('uint8')
>>> china.shape # doctest: +SKIP
(427, 640, 3)
>>> flower = load_sample_image('flower.jpg') # doctest: +SKIP
>>> flower.dtype # doctest: +SKIP
dtype('uint8')
>>> flower.shape # doctest: +SKIP
(427, 640, 3)
"""
images = load_sample_images()
index = None
for i, filename in enumerate(images.filenames):
if filename.endswith(image_name):
index = i
break
if index is None:
raise AttributeError("Cannot find sample image: %s" % image_name)
return images.images[index]
def _pkl_filepath(*args, **kwargs):
"""Ensure different filenames for Python 2 and Python 3 pickles
An object pickled under Python 3 cannot be loaded under Python 2.
An object pickled under Python 2 can sometimes not be loaded loaded
correctly under Python 3 because some Python 2 strings are decoded as
Python 3 strings which can be problematic for objects that use Python 2
strings as byte buffers for numerical data instead of "real" strings.
Therefore, dataset loaders in scikit-learn use different files for pickles
manages by Python 2 and Python 3 in the same SCIKIT_LEARN_DATA folder so
as to avoid conflicts.
args[-1] is expected to be the ".pkl" filename. Under Python 3, a
suffix is inserted before the extension to s
_pkl_filepath('/path/to/folder', 'filename.pkl') returns:
- /path/to/folder/filename.pkl under Python 2
- /path/to/folder/filename_py3.pkl under Python 3+
"""
py3_suffix = kwargs.get("py3_suffix", "_py3")
basename, ext = splitext(args[-1])
if sys.version_info[0] >= 3:
basename += py3_suffix
new_args = args[:-1] + (basename + ext,)
return join(*new_args)
| bsd-3-clause |
C2SM/hymet_idealized | neatpostproc_user.py | 1 | 69273 | # This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Copyright (C) 2013-2014 Steven Boeing, ETHZ
# A PYTHON SCRIPT TO POSTPROCESS ALL COSMO OUTPUT IN A DIRECTORY AT ONCE
# SEPARATE SCRIPTS SHOULD DO THE PLOTTING OF OUTPUT
# AIMED AT A COMBINATION OF SPEED; READABILITY AND A SOMEWHAT LIMIT MEMORY USAGE
# I.E. ABILITY TO POSTPROCESS LARGE DATA ON A SINGLE NODE
# detect all cosmo output files, their type, and find the corresponding time
# incrementally add the output to an output file
# includes masked/sampled statistics (like traditional LES models)
# masks can also be used to average over pre-selected subdomain (e.g. region around hill)
# but currently such domains are not implemented
# outputs to a file which is interpolated to height levels and saved with f4 precision (for file size limitation)
from numpy import *
import numpy as np
from netCDF4 import Dataset
import os,errno
from time import clock
from scipy import weave #Embed code in C
from scipy.weave import converters
import sys # system functions
import glob # a libary for regular expressions in file names
import shutil # for copying the files to project
import areaspectra as asp # A separate library for computing spectra
import tarfile # For compressing the cloud data
from optparse import OptionParser # Flags to add later: store interpolated output?
from variablelist import * # A separate file contains the actual variable list
import getpass
myusername=getpass.getuser()
nboundlines=3
interpout=True # also store interpolated output on regular grid for fast data exploration
loadmpl=False # Load matplotlib
lbud=True # Try to calculate budget variables?
lzlib=True # Compress output using zlib?
lcross=True # Produce cross-sections? Mais oui
lclouds=True # Produce tar-ball with 3d cloud fields for storage
lsats=True # Produce file with satellite stuff
np.seterr(invalid='ignore') # don't wine about nans
# cosmo constants for calculations
pref=1.0e5 # ref pressure
rd=287.05 # gas constant, dry air
rv=461.51 # gas constant, water vapor
cpd=1005.0 # heat capacity at constant pressure
rlv=2.501e6 # latent heat of condensation
riv=2.835e6 # latent heat of freezing
grav=9.81 # gravitational acceleration
# constants used in calculation of saturation pressure (COSMO specific)
b1=610.78
b2w=17.2693882
b2i=21.8745584
b3=273.16
b4w=35.86
b4i=7.66
start=clock()
# currently not used, but useful when analyzing problems
if loadmpl:
import matplotlib
matplotlib.use('agg') # first define agg output, then continue to load rest of matplotlib and pyplot
from matplotlib import *
from matplotlib.pyplot import *
# forced makedir
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else: raise
# specify different cosmo outputfile types
def make_filelist():
global filelist
types = {'level':'lfff*.nc','cloud':'lf*.nca','crossxy':'lf*.ncb','height':'lfff*.ncc','sats':'lfff*.ncs'} # the file types
filelist={}
for i in types:
filelist[i]=glob.glob(fulldir+types[i])
filelist[i].sort()
# create a tarfile (for cloud data)
def make_tarfile(output_filename, infiles):
with tarfile.open(output_filename,'w') as tar:
for infile in infiles:
tar.add(infile)
# class for masks/conditional sampling
class mask:
def __init__(self):
pass
def setfield(self,field):
self.field=field
# mask for spectra, currently the same as regular mask class
class specmask(mask):
pass
# set values in an array of heigths to at least the surface value
# useful for integration of e.g. water vapor anomaly
# (whenever dz needed, cosmo simply interpolates on)
# uses C-code (weave) for speed
def replace_h_hlower(h,hlower,tmax,kmax,jmax,imax):
code = """
int i, j, k, t;
for (t=0; t<tmax; t++) {
for (k=0; k<kmax; k++) {
for (j=0; j<jmax; j++) {
for (i=0; i<imax; i++) {
if(h(t,k,j,i)<hlower(j,i)) {
h(t,k,j,i)=hlower(j,i);
}
}
}
}
}
"""
weave.inline(code,['h','hlower','tmax','kmax','jmax','imax'],type_converters=converters.blitz,compiler='gcc')
# weave interpolation to height levels
# takes advantage of known cosmo 2d layout
# uses C-code (weave) for speed
def int_to_height(outarr,outheights,inarr,inheights,tmax,kmax,jmax,kmaxout):
code = """
int t,k,j,ks;
int kstore[tmax][jmax];
for (t=0; t<tmax; t++) {
for (j=0; j<jmax; j++) {
kstore[t][j]=kmaxout-1;
}
}
for (t=0; t<tmax; t++) {
for (k=0; k<kmax-1; k++) {
for (j=0; j<jmax; j++) {
ks=kstore[t][j];
while(outheights(ks)>inheights(t,k+1,j) and ks>0) {
outarr(t,ks,j)=inarr(t,k,j)+(outheights(ks)-inheights(t,k,j))*(inarr(t,k+1,j)-inarr(t,k,j))/(inheights(t,k+1,j)-inheights(t,k,j));
ks=ks-1;
}
if(ks==0 and outheights(ks)>(inheights(t,k+1,j)-1e-8)) {
outarr(t,ks,j)=inarr(t,k,j)+(outheights(ks)-inheights(t,k,j))*(inarr(t,k+1,j)-inarr(t,k,j))/(inheights(t,k+1,j)-inheights(t,k,j));
}
kstore[t][j]=ks;
}
}
}
"""
weave.inline(code,['outarr','outheights','inarr','inheights','tmax','kmax','jmax','kmaxout'],type_converters=converters.blitz,compiler='gcc')
# prepare spectral data into 2d arrays, containing stretches
# along all points that match sampling criteria
# currently assumes xz-type geometry
# uses C-code (weave) for speed
def prepare_spectra_xz(dataout,data,mask,tmax,kmax,jmax,imax,nsegmentsmax):
code = """
int i, j, k, t, nsegments;
nsegments=0;
for (t=0; t<tmax; t++) {
for (k=0; k<kmax; k++) {
for (i=0; i<imax; i++) {
if(mask(t,k,i)) {
for (j=0; j<jmax; j++) {
dataout(nsegments,j)=data(t,k,j,i);
}
nsegments=nsegments+1;
}
}
}
}
"""
weave.inline(code,['dataout','data','mask','tmax','kmax','jmax','imax','nsegmentsmax'],type_converters=converters.blitz,compiler='gcc')
# mean value across 2d slab
# note: to calculate a true mean for a run with topography, including filled values, you need to correct for the topography
def mean2d(inputfield):
meanfield=mean(mean(inputfield,axis=-2, dtype=np.float64),axis=-1, dtype=np.float64)
return meanfield
# mean value across 1d line
def mean_1d(inputfield,geometry):
if(geometry=='xz'):
meanfield=mean(inputfield,axis=-2, dtype=np.float64)
elif(geometry=='yz'):
meanfield=mean(inputfield,axis=-1, dtype=np.float64)
return meanfield
# extraction used for cross-sections
def extract_1d(inputfield,geometry):
if(geometry=='xz'):
extrfield=inputfield[:,:,0,:]
elif(geometry=='yz'):
extrfield=inputfield[:,:,:,0]
return extrfield
# deviations with respect to a 1d line
def deviation_1d(inputfield,geometry):
if(geometry=='xz'):
meanfield=mean(inputfield,axis=2, dtype=np.float64)
delta=inputfield-meanfield[:,:,None,:]
elif(geometry=='yz'):
meanfield=mean(inputfield,axis=3, dtype=np.float64)
delta=inputfield-meanfield[:,:,:,None]
return delta
# command to get a single variable from a file
def var_from_file(dataset,key):
try:
return dataset.variables[(key)][:,:,nboundlines:-nboundlines,nboundlines:-nboundlines]
except:
return(nan)
# a class to include some methods needed for both helper objects and netcdf objects
class get_variable_class():
# gv being "get variable"
def gv(self,key):
if key in self.varkeys:
return self.data.variables[(key)][:,:,nboundlines:-nboundlines,nboundlines:-nboundlines]
else:
return(self.data.variables[('P')][:,:,nboundlines:-nboundlines,nboundlines:-nboundlines]*nan)
# gdim being "get dimension"
def gdim(self,key):
try:
return self.data.variables[(key)][:]
except:
try:
return xrange(len(self.data.dimensions[(key)]))
except:
return([nan])
# a class to store derived variables from output which are needed relatively often
class nchelper(object,get_variable_class):
def __init__(self,geom):
self.data=[]
self.geom=geom
self.tstep=0
def update(self):
self.tstep=self.tstep+1
def calcrhoqc(self):
p=self.gv('P')
t=self.gv('T')
hydrotot=self.gv('QC')+self.gv('QR')+self.gv('QS')+self.gv('QG')+self.gv('QI')
# if no ice species defined, correct precipitation
if(isnan(hydrotot[0,0,0,0])):
hydrotot=self.gv('QC')+self.gv('QR')+self.gv('QS')+self.gv('QI')
if(isnan(hydrotot[0,0,0,0])):
hydrotot=self.gv('QC')
trho=t*(1+(rv/rd-1)*self.gv('QV')-hydrotot)
self.rhoh=p/(rd*trho)
qci=self.gv('QC')+self.gv('QI')
# if ice species are not defined, correct qci
if(isnan(hydrotot[0,0,0,0])):
qci=seld.gv('QC')
self.qt=self.gv('QC')+self.gv('QV')+self.gv('QI')
if(isnan(self.qt[0,0,0,0])):
self.qt=self.gv('QC')+self.gv('QV')
iexnf=(p/pref)**(-rd/cpd) #inverse exner function (not stored)
# buoyancy potenital temperature
self.thetarhoh=trho*iexnf
# cloud mask
self.cld=(qci>1.0e-6)
# derived variables specific to level output
class nclevelhelper(nchelper):
def update(self,data):
self.data=data
self.varkeys=self.data.variables.keys()
self.calcrhoqc()
h=self.gv('HHL')
w=self.gv('W')
dh=h[:,1:,:,:]-h[:,:-1,:,:]
# integrated paths of water species
self.vwp=-sum(self.rhoh*self.gv('QV')*dh,axis=1,dtype=float64)
self.cwp=-sum(self.rhoh*self.gv('QC')*dh,axis=1,dtype=float64)
self.rwp=-sum(self.rhoh*self.gv('QR')*dh,axis=1,dtype=float64)
self.gwp=-sum(self.rhoh*self.gv('QG')*dh,axis=1,dtype=float64)
self.swp=-sum(self.rhoh*self.gv('QS')*dh,axis=1,dtype=float64)
self.iwp=-sum(self.rhoh*self.gv('QI')*dh,axis=1,dtype=float64)
self.wmax=nanmax(w,axis=1)
self.wmin=nanmin(w,axis=1)
self.sshf=self.gv('HFLUX')[:,-1,:,:]
self.slhf=self.gv('EFLUX')[:,-1,:,:]
self.h=h
self.hh=0.5*(h[:,1:]+h[:,:-1])
self.wh=0.5*(w[:,1:]+w[:,:-1])
# cloud and cloud updraft fractions
self.cldqcifrac=1.0*(sum(self.cld,axis=1)>0)
self.cldqciw1frac=1.0*(sum(self.cld*(self.wh>1.0),axis=1)>0)
# wind at lowest level
self.llwind=sqrt(self.gv('U')[:,-1,:,:]**2+self.gv('V')[:,-1,:,:]**2)
super(nclevelhelper,self).update()
# derived variables specific to height output
class ncheighthelper(nchelper):
def update(self,data):
self.data=data
self.varkeys=self.data.variables.keys()
self.calcrhoqc()
if(self.tstep==0):
self.make_topo()
### calculate buoyancies, and fix output
if(self.geom=='xz'):
mthetarhoh=mean2d(self.thetarhoh*self.topomask[None,:,None,:])[:,:,None,None]/mean2d(self.topomask[None,:,None,:])[:,:,None,None]
self.buoy=grav*(self.thetarhoh*self.topomask[None,:,None,:]-mthetarhoh)/(mthetarhoh*self.topomask[None,:,None,:])
elif(self.geom=='yz'):
mthetarhoh=mean2d(self.thetarhoh*self.topomask[None,:,:,None])[:,:,None,None]/mean2d(self.topomask[None,:,:,None])[:,:,None,None]
self.buoy=grav*(self.thetarhoh*self.topomask[None,:,:,None]-mthetarhoh)/(mthetarhoh*self.topomask[None,:,:,None])
whereinf=isinf(self.buoy);
self.buoy[whereinf] = nan;
nanbuoy=self.buoy[:,:,:,:]
wherefin = isfinite(nanbuoy);
nanbuoy[wherefin==False] = 0.0;
### calculate water vapor anomalies, and fix output
qv=self.gv('QV')
if(self.geom=='xz'):
mqv=mean2d(qv*self.topomask[None,:,None,:])[:,:,None,None]/mean2d(self.topomask[None,:,None,:])[:,:,None,None]
self.qvp=(qv*self.topomask[None,:,None,:]-mqv)
elif(self.geom=='yz'):
mqv=mean2d(qv*self.topomask[None,:,:,None])[:,:,None,None]/mean2d(self.topomask[None,:,:,None])[:,:,None,None]
self.qvp=(qv*self.topomask[None,:,:,None]-mqv)
whereinf=isinf(self.qvp);
self.qvp[whereinf] = nan;
nanqvp=self.qvp[:,:,:,:]
wherefin = isfinite(nanqvp);
nanqvp[wherefin==False] = 0.0;
# integrated/min/max quantities
self.buoymax=nanmax(nanbuoy,axis=1)
self.buoymin=nanmin(nanbuoy,axis=1)
self.posbuoypath=sum(nanbuoy*(nanbuoy>0.0)*self.dh,axis=1,dtype=float64)
self.negbuoypath=sum(nanbuoy*(nanbuoy<0.0)*self.dh,axis=1,dtype=float64)
self.poswvapanompath=sum(nanqvp*(nanqvp>0.0)*self.dh,axis=1,dtype=float64)
self.negwvapanompath=sum(nanqvp*(nanqvp<0.0)*self.dh,axis=1,dtype=float64)
super(ncheighthelper,self).update()
def make_topo(self):
# make masks for the topography
alt=self.gdim('altitude')
lalt=len(alt)
tim=self.gdim('time')
lent=len(tim)
if(self.geom=='xz'):
self.topomask=zeros((lalt,shape(hlower)[1]),int)
self.hhsmall=zeros((lent,lalt,shape(hlower)[1]))
for k in xrange(lalt):
for j in xrange(shape(hlower)[1]):
self.topomask[k,j]=1.0*(alt[k]>hlower[0,j])
self.hhsmall[:,k,j]=alt[k]*(alt[k]>hlower[0,j])
elif(self.geom=='yz'):
self.topomask=zeros((lalt,shape(hlower)[0]),int)
self.hhsmall=zeros((lent,lalt,shape(hlower)[0]))
for k in xrange(lalt):
for j in xrange(shape(hlower)[0]):
self.topomask[k,j]=1.0*(alt[k]>hlower[j,0])
self.hhsmall[:,k,j]=alt[k]*(alt[k]>hlower[j,0])
self.topomasknan=zeros(shape(self.topomask))
self.topomasknan[:]=self.topomask[:]
self.topomasknan[self.topomask==0]=np.nan
# calculate height difference between cells, make sure height
# below the topography is not counted
if(self.geom=='xz'):
h1=(self.hhsmall[:,0,:])[:,None,None,:]
h2=0.5*(self.hhsmall[:,1:,:]+self.hhsmall[:,:-1,:])[:,:,None,:]
h3=(self.hhsmall[:,-1,:])[:,None,None,:]
h=np.concatenate((h1,h2,h3),axis=1)
replace_h_hlower(h,hlower,shape(h)[0],shape(h)[1],shape(h)[2],shape(h)[3])
elif(self.geom=='yz'):
h1=(self.hhsmall[:,0,:])[:,None,None,:]
h2=0.5*(self.hhsmall[:,1:,:]+self.hhsmall[:,:-1,:])[:,:,:,None]
h3=(self.hhsmall[:,-1,:])[:,None,:,None]
h=np.concatenate((h1,h2,h3),axis=1)
replace_h_hlower(h,hlower,shape(h)[0],shape(h)[1],shape(h)[2],shape(h)[3])
self.dh=h[:,1:,:,:]-h[:,:-1,:,:]
class ncobject(object,get_variable_class):
# class for writing to netcdf
def __init__(self,outfile):
self.data=[] # links to input data
self.outvars={}
self.ncoutname=outdir+outfile
try:
os.remove(self.ncoutname)
except:
pass
self.outfile=Dataset(self.ncoutname,'w',format='NETCDF4',zlib=lzlib)
self.outfile.createDimension('time',0)
self.outfile.createVariable('time', 'f8', ('time',),zlib=lzlib)
self.dirvars=[]
self.dervars=[]
self.dervarsunits=[]
self.outfile.close()
self.tstep=0
# get a 2d variable from file
def gv_2d(self,key):
try:
return self.data.variables[(key)][:,nboundlines:-nboundlines,nboundlines:-nboundlines]
except:
return([nan])
# get the units
def gu(self,key):
try:
return self.data.variables[(key)].units
except:
return([''])
# functions below are defined in derived classes when needed
def makedims(self):
pass
def app_dirvars(self):
pass
def app_dervars(self):
pass
def calc_masks(self):
pass
# LAYOUT OF POSTPROCESSING TIME STEP
def appvars(self):
self.calc_masks()
self.app_dirvars()
self.app_dervars()
def opener(self,data):
self.data=data
self.varkeys=self.data.variables.keys()
self.outfile=Dataset(self.ncoutname,'a',format='NETCDF4',zlib=lzlib)
if(self.tstep==0):
self.set_dims()
self.bt=len(self.outfile.variables['time'])
self.et=len(self.outfile.variables['time'])+len(self.data.variables['time'][:])
self.outfile.variables['time'][self.bt:self.et]=self.data.variables['time'][:]
print(self.outfile.variables['time'][self.bt:self.et])
def closer(self):
self.tstep=self.tstep+1
self.outfile.close()
def app_tstep(self,data):
self.opener(data)
self.appvars()
self.closer()
# functions to initialize dimensions in output
def set_dims(self):
pass
def init_dim(self,dimname,dimvalues):
self.outfile.createDimension(dimname,len(dimvalues))
var=self.outfile.createVariable(dimname, 'f8', (dimname,),zlib=lzlib)
var[:]=dimvalues
def init_dimz(self):
self.init_dim('z',self.gdim('altitude'))
def init_dimx(self):
self.xs=[round(x) for x in 1000*(self.gdim('rlon')[nboundlines:-nboundlines]-nanmin(self.gdim('rlon')[nboundlines:-nboundlines]))/0.00899289]
self.init_dim('x',self.xs)
def init_dimy(self):
self.ys=[round(x) for x in 1000*(self.gdim('rlat')[nboundlines:-nboundlines]-nanmin(self.gdim('rlat')[nboundlines:-nboundlines]))/0.00899289]
self.init_dim('y',self.ys)
def init_dimchannels(self):
self.init_dim('nsynmsg',self.gdim('nsynmsg'))
# functions to initialize variables in output (dir=DIRECT, der=DERIVED)
def try_init_dirvar(self,var,dims,mask=''):
if(self.tstep==0):
so=self.outfile.createVariable(var+mask, 'f4', dims,zlib=lzlib)
so.missing_value = nan
so.long_name=self.data.variables[var].long_name
so.units=self.gu(var)
def try_init_der_var(self,var,dims,mask='',vtype=''):
if(self.tstep==0):
so=self.outfile.createVariable(var+mask, 'f4', dims,zlib=lzlib)
so.missing_value = nan
so.long_name=var
if(vtype=='maskfrac'):
so.units='-'
elif(vtype=='mf'):
so.units='kg m s-1'
elif(vtype=='spec'):
try:
so.units=self.dervarsunits[var]+' m-1'
except:
so.units=self.gu(var)+' m-1'
else:
so.units=self.dervarsunits[var]
# make all sampled variables
def make_sampvars(self,insampvars):
self.sampvars={}
for i in self.dirvars+self.dervars:
if i in insampvars:
self.sampvars[i]=True
else:
self.sampvars[i]=False
def make_specvars(self,inspecvars):
self.specvars={}
for i in self.dirvars+self.dervars:
if i in inspecvars:
self.specvars[i]=True
else:
self.specvars[i]=False
# most low level way to write a field
def put_var(self,var,field):
self.outfile.variables[var][self.bt:self.et]=field
# general class xy cross section variables
class statgroup_spectra(ncobject):
def __init__(self,geom,outfile):
super(statgroup_spectra,self).__init__(outfile)
self.geom=geom
self.initiated=False
# general class xy cross section variables
class statgroupint(ncobject):
def __init__(self,outfile):
super(statgroupint,self).__init__(outfile)
def app_dirvars(self):
for var in self.dirvars:
if var in self.varkeys:
self.try_init_dirvar(var,('time','y','x',))
self.put_var(var,self.gv_2d(var))
def set_dims(self):
self.init_dimx()
self.init_dimy()
class statgroupintlevel(statgroupint):
def robust_minimum_finder(self,field):
isminimum=nan*zeros(shape(field))
self.robust_minimum_weaver(isminimum,field,shape(field)[0],shape(field)[1],shape(field)[2],shape(field)[3])
return isminimum
def robust_minimum_weaver(self,isminimum,f,tmax,kmax,jmax,imax):
code = """
int i, j, k, t;
for (t=0; t<tmax; t++) {
for (k=3; k<kmax-3; k++) {
for (j=0; j<jmax; j++) {
for (i=0; i<imax; i++) {
if(f(t,k,j,i)>f(t,k-1,j,i) and f(t,k,j,i)>f(t,k-2,j,i) and f(t,k,j,i)>f(t,k-3,j,i) and f(t,k,j,i)>f(t,k+1,j,i) and f(t,k,j,i)>f(t,k+2,j,i) and f(t,k,j,i)>f(t,k+3,j,i)) {
isminimum(t,k,j,i)=1;
}
}
}
}
}
"""
weave.inline(code,['isminimum','f','tmax','kmax','jmax','imax'],type_converters=converters.blitz,compiler='gcc')
def __init__(self):
super(statgroupintlevel,self).__init__('intlv.'+marker+'.nc')
self.dirvars=dirintvarslevel
self.dervars=derintvarslevel
self.dervarsunits=derintvarsunitslevel
self.helper=levelhelper
def app_dervars(self):
for var in self.dervars:
self.try_init_der_var(var,('time','y','x',))
# calculate and put the actual derived variables
self.put_var('VWP',self.helper.vwp)
self.put_var('CWP',self.helper.cwp)
self.put_var('RWP',self.helper.rwp)
self.put_var('GWP',self.helper.gwp)
self.put_var('SWP',self.helper.swp)
self.put_var('IWP',self.helper.iwp)
self.put_var('WMAX',self.helper.wmax)
self.put_var('WMIN',self.helper.wmin)
self.put_var('SSHF',self.helper.sshf)
self.put_var('SLHF',self.helper.slhf)
self.put_var('CLDQCIFRAC',self.helper.cldqcifrac)
self.put_var('CLDQCIW1FRAC',self.helper.cldqciw1frac)
self.put_var('CLDTOP',nanmax(self.helper.cld*self.helper.hh,axis=1))
self.put_var('CLDUPDTOP',nanmax(self.helper.cld*(self.helper.wh>1.0)*self.helper.hh,axis=1))
# find boundary layer using maximum in second derivative
# algorithm is relatively robust for daytime?
thetarhograd=(self.helper.thetarhoh[:,2:,:,:]-self.helper.thetarhoh[:,:-2,:,:])/(self.helper.hh[:,2:,:,:]-self.helper.hh[:,:-2,:,:])
isminimum=self.robust_minimum_finder(thetarhograd)
hpbl=nanmin(isminimum*self.helper.hh[:,1:-1,:,:],axis=1)
self.put_var('HPBLTHETA',hpbl)
self.put_var('DHPBLTHETA',hpbl-hlower)
thetarhograd2=(thetarhograd[:,1:,:,:]-thetarhograd[:,:-1,:,:])/(self.helper.hh[:,2:-1,:,:]-self.helper.hh[:,1:-2,:,:])
isminimum=self.robust_minimum_finder(thetarhograd2)
hpbl=nanmin(isminimum*self.helper.h[:,2:-2,:,:],axis=1)
self.put_var('HPBLTHETA2',hpbl)
self.put_var('DHPBLTHETA2',hpbl-hlower)
# make some space for new variables
del thetarhograd,thetarhograd2,isminimum
qtthadv=(self.gv('AQVT_ADV')+self.gv('AQCT_ADV'))
dh=self.helper.h[:,1:,:,:]-self.helper.h[:,:-1,:,:]
# qt convergence by advection only
self.put_var('QTTCONV',sum(qtthadv*(qtthadv>0.)*dh*self.helper.rhoh,axis=1,dtype=float64))
self.put_var('QTTDIV',sum(qtthadv*(qtthadv<0.)*dh*self.helper.rhoh,axis=1,dtype=float64))
self.put_var('QTTNET',sum(qtthadv*dh*self.helper.rhoh,axis=1,dtype=float64))
# d(rho*w)/dz, measure for mass convergence (positive and negative integrals must equal...)
drhow=-(concatenate(((self.helper.wh*self.helper.rhoh)[:,:,:,:],0.*(self.helper.wh[:,0,:,:]*self.helper.rhoh[:,0,:,:])[:,None,:,:]),axis=1)-concatenate((0.*(self.helper.wh[:,0,:,:]*self.helper.rhoh[:,0,:,:])[:,None,:,:],(self.helper.wh*self.helper.rhoh)[:,:,:,:]),axis=1))
self.put_var('DRHOWDZPOSINT',sum(drhow*(drhow>0.),axis=1,dtype=float64))
self.put_var('DRHOWDZNEGINT',sum(drhow*(drhow<0.),axis=1,dtype=float64))
self.put_var('LLWIND',self.helper.llwind)
class statgroupintheight(statgroupint):
def __init__(self):
super(statgroupintheight,self).__init__('intz.'+marker+'.nc')
self.dirvars=dirintvarsheight
self.dervars=derintvarsheight
self.dervarsunits=derintvarsunitsheight
self.helper=heighthelper
def app_dervars(self):
for var in self.dervars:
self.try_init_der_var(var,('time','y','x',))
self.put_var('BUOYMAX',self.helper.buoymax)
self.put_var('BUOYMIN',self.helper.buoymin)
self.put_var('POSBUOYPATH',self.helper.posbuoypath)
self.put_var('NEGBUOYPATH',self.helper.negbuoypath)
self.put_var('BUOYPATH',self.helper.posbuoypath+self.helper.negbuoypath)
self.put_var('POSWVAPANOMPATH',self.helper.poswvapanompath)
self.put_var('NEGWVAPANOMPATH',self.helper.negwvapanompath)
self.put_var('WVAPANOMPATH',self.helper.poswvapanompath+self.helper.negwvapanompath)
# general class for domain averaged variables
class statgroup_dom(ncobject):
def __init__(self,outfile):
super(statgroup_dom,self).__init__(outfile)
def app_dirvars(self):
for var in self.dirvars:
if var in self.varkeys:
self.try_init_dirvar(var,('time',))
self.put_var(var,mean2d(self.gv_2d(var)))
class statgroup_domlevel(statgroup_dom):
def __init__(self):
super(statgroup_domlevel,self).__init__('domlv.'+marker+'.nc')
self.dirvars=dirdomvarslevel
self.dervars=derdomvarslevel
self.dervarsunits=derdomvarsunitslevel
self.helper=levelhelper
def app_dervars(self):
for var in self.dervars:
self.try_init_der_var(var,('time',))
# calculate and put the actual derived variables
self.put_var('VWP',mean2d(self.helper.vwp))
self.put_var('CWP',mean2d(self.helper.cwp))
self.put_var('RWP',mean2d(self.helper.rwp))
self.put_var('GWP',mean2d(self.helper.gwp))
self.put_var('SWP',mean2d(self.helper.swp))
self.put_var('IWP',mean2d(self.helper.iwp))
self.put_var('WMAX',nanmax(nanmax(self.helper.wmax,axis=2),axis=1))
self.put_var('WMIN',nanmin(nanmin(self.helper.wmin,axis=2),axis=1))
self.put_var('SSHF',mean2d(self.helper.sshf))
self.put_var('SLHF',mean2d(self.helper.slhf))
self.put_var('CLDQCIFRAC',mean2d(self.helper.cldqcifrac))
self.put_var('CLDQCIW1FRAC',mean2d(self.helper.cldqciw1frac))
class statgroup_domheight(statgroup_dom):
def __init__(self):
super(statgroup_domheight,self).__init__('domz.'+marker+'.nc')
self.dirvars=dirdomvarsheight
self.dervars=derdomvarsheight
self.dervarsunits=derdomvarsunitsheight
self.helper=heighthelper
def app_dervars(self):
for var in self.dervars:
self.try_init_der_var(var,('time',))
self.put_var('BUOYMAX',nanmax(nanmax(self.helper.buoymax,axis=2),axis=1))
self.put_var('BUOYMIN',nanmin(nanmin(self.helper.buoymin,axis=2),axis=1))
# general class for domain averaged variables along one dimension
# (hoevmoller diagrams)
class statgroup_hov(ncobject):
def __init__(self,geom,outfile):
super(statgroup_hov,self).__init__(outfile)
self.geom=geom
def app_dirvars(self):
for var in self.dirvars:
if var in self.varkeys:
if(self.geom=='xz'):
self.try_init_dirvar(var,('time','x',))
elif(self.geom=='yz'):
self.try_init_dirvar(var,('time','y',))
self.put_var(var,mean_1d(self.gv_2d(var),self.geom))
def set_dims(self):
if(self.geom=='xz'):
self.init_dimx()
elif(self.geom=='yz'):
self.init_dimy()
class statgroup_hovlevel(statgroup_hov):
def __init__(self,geom):
super(statgroup_hovlevel,self).__init__(geom,'hovlv.'+marker+'.nc')
self.dirvars=dirhovvarslevel
self.dervars=derhovvarslevel
self.dervarsunits=derhovvarsunitslevel
self.helper=levelhelper
def app_dervars(self):
# calculate and put the derived variables
if(self.geom=='xz'):
for var in self.dervars:
self.try_init_der_var(var,('time','x'))
self.put_var('WMAX',nanmax(self.helper.wmax,axis=1))
self.put_var('WMIN',nanmin(self.helper.wmin,axis=1))
elif(self.geom=='yz'):
for var in self.dervars:
self.try_init_der_var(var,('time','y'))
self.put_var('WMAX',nanmax(self.helper.wmax,axis=2))
self.put_var('WMIN',nanmin(self.helper.wmin,axis=2))
self.put_var('VWP',mean_1d(self.helper.vwp,self.geom))
self.put_var('CWP',mean_1d(self.helper.cwp,self.geom))
self.put_var('RWP',mean_1d(self.helper.rwp,self.geom))
self.put_var('GWP',mean_1d(self.helper.gwp,self.geom))
self.put_var('SWP',mean_1d(self.helper.swp,self.geom))
self.put_var('IWP',mean_1d(self.helper.iwp,self.geom))
self.put_var('SSHF',mean_1d(self.helper.sshf,self.geom))
self.put_var('SLHF',mean_1d(self.helper.slhf,self.geom))
self.put_var('CLDQCIFRAC',mean_1d(self.helper.cldqcifrac,self.geom))
self.put_var('CLDQCIW1FRAC',mean_1d(self.helper.cldqciw1frac,self.geom))
self.put_var('LLWIND',mean_1d(self.helper.llwind,self.geom))
class statgroup_hovheight(statgroup_hov):
def __init__(self,geom):
super(statgroup_hovheight,self).__init__(geom,'hovz.'+marker+'.nc')
self.dirvars=dirhovvarsheight
self.dervars=derhovvarsheight
self.dervarsunits=derhovvarsunitsheight
self.helper=heighthelper
def app_dervars(self):
if(self.geom=='xz'):
for var in self.dervars:
self.try_init_der_var(var,('time','x'))
self.put_var('BUOYMAX',nanmax(self.helper.buoymax,axis=1))
self.put_var('BUOYMIN',nanmin(self.helper.buoymin,axis=1))
elif(self.geom=='yz'):
for var in self.dervars:
self.try_init_der_var(var,('time','y'))
self.put_var('BUOYMAX',nanmax(self.helper.buoymax,axis=2))
self.put_var('BUOYMIN',nanmin(self.helper.buoymin,axis=2))
self.put_var('POSBUOYPATH',mean_1d(self.helper.posbuoypath,self.geom))
self.put_var('NEGBUOYPATH',mean_1d(self.helper.negbuoypath,self.geom))
self.put_var('BUOYPATH',mean_1d(self.helper.posbuoypath+self.helper.negbuoypath,self.geom))
self.put_var('POSWVAPANOMPATH',mean_1d(self.helper.poswvapanompath,self.geom))
self.put_var('NEGWVAPANOMPATH',mean_1d(self.helper.negwvapanompath,self.geom))
self.put_var('WVAPANOMPATH',mean_1d(self.helper.poswvapanompath+self.helper.negwvapanompath,self.geom))
# general class for statistics based on 3d output
# this class is somewhat more extensive
# than the previous examples
class statgroup(ncobject):
def __init__(self,geom,outfile):
super(statgroup,self).__init__(outfile)
self.geom=geom
self.init_specmasks(['upstream','top','downstream','upstreambox','topbox','downstreambox'])
def app_dirvars(self):
for var in self.dirvars:
if var in self.varkeys:
self.make_var(var)
if(self.sampvars[var]==True):
for mask in self.masks.keys():
self.make_var(var,mask=mask)
self.put_mean_int_mask(var,self.gv(var))
def app_dervars(self): #different for level and height output
pass
def set_dims(self):
self.init_vdims()
self.init_hdims()
def init_vdims(self):
pass
def init_hdims(self):
if(self.geom=='xz'):
self.init_dimx()
elif(self.geom=='yz'):
self.init_dimy()
def mask_fracs(self):
for mask in self.masks.keys():
self.maskfrac[mask]=mean_1d(self.masks[mask].field,self.geom)
self.make_dervar(str(mask)+'frac',vtype='maskfrac')
self.put_mean_int(str(mask)+'frac',1.0*self.masks[mask].field)
def specmask_fracs(self):
for mask in self.specmasks.keys():
self.specmaskfrac[mask]=self.specmasks[mask].field
self.make_dervar(str(mask)+'frac',vtype='maskfrac')
if(self.geom=='xz'):
self.put_mean_int(str(mask)+'frac',1.0*self.specmasks[mask].field[:,:,None])
elif(self.geom=='yz'):
self.put_mean_int(str(mask)+'frac',1.0*self.specmasks[mask].field[:,None,:])
def mask_fracs_2d(self):
for mask in self.masks.keys():
self.maskfrac2d[mask]=mean2d(self.masks[mask].field)
def init_masks(self,masks):
self.masks={}
for maskname in masks:
self.masks[maskname]=mask()
self.maskfrac={}
self.maskfrac2d={}
def init_specmasks(self,masks):
self.specmasks={}
for maskname in masks:
self.specmasks[maskname]=specmask()
self.specmaskfrac={}
def calc_specmask(self):
# define areas at the top, upsteam, and downstream
top = nanmax(hlower)
bot = nanmin(hlower)
tres1=bot+0.5*(top-bot)
tres2=bot+0.01*(top-bot)
tres3=bot+0.0001*(top-bot)
if(self.geom=='xz'):
toploc = (hlower[0,:]).argmax()
comp=hlower[0,:]
locs=(xrange(len(hlower[0,:]))<toploc)
elif(self.geom=='yz'):
toploc = (hlower[:,0]).argmax()
comp=hlower[:,0]
locs=(xrange(len(hlower[:,0]))<toploc)
else:
return
upstream=(comp>tres3)*(comp<=tres2)*locs
downstream=(comp>tres3)*(comp<=tres2)*(locs==False)
top=(comp>tres1)
self.specmasks['upstream'].setfield(upstream[None,:]*(self.hhsmall[:,:]>comp[None,:])*(self.hhsmall[:,:]<(comp[None,:]+500.)))
self.specmasks['top'].setfield(top[None,:]*(self.hhsmall[:,:]>comp[None,:])*(self.hhsmall[:,:]<(comp[None,:]+500.)))
self.specmasks['downstream'].setfield(downstream[None,:]*(self.hhsmall[:,:]>comp[None,:])*(self.hhsmall[:,:]<(comp[None,:]+500.)))
self.specmasks['upstreambox'].setfield(upstream[None,:]*(self.hhsmall[:,:]>comp[None,:])*(self.hhsmall[:,:]<(comp[None,:]+2500.)))
self.specmasks['topbox'].setfield(top[None,:]*(self.hhsmall[:,:]>comp[None,:])*(self.hhsmall[:,:]<(comp[None,:]+2500.)))
self.specmasks['downstreambox'].setfield(downstream[None,:]*(self.hhsmall[:,:]>comp[None,:])*(self.hhsmall[:,:]<(comp[None,:]+2500.)))
def make_spec(self,var,field):
if(shape(hlower)[0]==1):
return
for mask in self.specmasks:
nsegmentsmax=sum(self.specmasks[mask].field[:,:,:]==True)
if(nsegmentsmax>0):
dataout=zeros((nsegmentsmax,shape(field)[2]))
prepare_spectra_xz(dataout,field,self.specmasks[mask].field[:,:,:],shape(field)[0],shape(field)[1],shape(field)[2],shape(field)[3],nsegmentsmax)
# calculate distance between grid points
dy=1000*((nanmax(self.gdim('rlat')))-nanmin(self.gdim('rlat')))/(0.00899289*(len(self.gdim('rlat'))-1))
(p,wavenr)=asp.spectrum_peri(dataout, Fs=1/dy, pad=False, smooth=False,rmzf=True,scale_by_freq=True)
if(self.spectra.initiated==False):
self.spectra.init_dim('wavenr',wavenr)
self.spectra.initiated=True
self.spectra.try_init_der_var(var,('time','wavenr'),mask=mask,vtype='spec')
self.spectra.put_var(var+mask,p)
# class specific to level statistics
# this includes means in 1d and 2d, cross-sections, conditionally sampled variables
# interpolated output to height levels (for convenience/quick data
# exploration)
class statgroup_level(statgroup):
# initiatize cross sections, interpolation, spectra
def __init__(self,geom):
super(statgroup_level,self).__init__(geom,geom+'lv.'+marker+'.nc')
self.dirvars=dirvarslevel
self.dervars=dervarslevel
self.dervarsunits=dervarsunitslevel
self.helper=levelhelper
self.spectra=statgroup_spectra(geom,'spectralv.'+marker+'.nc')
self.spectra.dervarsunits=dervarsunitslevel
self.interp1d=statgroup_heightprof('interp1d.'+marker+'.nc')
self.interp1d.dervarsunits=dervarsunitslevel
self.interp=statgroup_interp(self.geom,'interp.'+marker+'.nc')
self.interp.dervarsunits=dervarsunitslevel
self.cross=crossgroup_level(self.geom,'crosslv.'+marker+'.nc')
self.cross.dervarsunits=dervarsunitslevel
self.cross.interp=crossgroup_interp(self.geom,'crossinterp.'+marker+'.nc')
self.cross.interp.dervarsunits=dervarsunitslevel
self.init_masks(['cld','upd','cldupd','cldupdw1'])
self.make_sampvars(sampvars)
self.make_specvars(specvars)
def calc_masks(self):
# this is where the actual masks are calculated
self.masks['cld'].setfield(self.helper.cld)
self.masks['upd'].setfield((self.helper.wh>0.0))
self.masks['cldupd'].setfield((self.helper.wh>0.0)*self.helper.cld)
self.masks['cldupdw1'].setfield((self.helper.wh>1.0)*self.helper.cld)
self.mask_fracs()
self.specmask_fracs()
# Adding a time step (overload because of interp,cross and sepctra)
def app_tstep(self,data):
self.interp.opener(data)
self.interp1d.opener(data)
self.cross.opener(data)
self.cross.interp.opener(data)
self.spectra.opener(data)
if(self.tstep==0):
self.interp.init_dim('z',interparr)
self.interp1d.init_dim('z',interparr)
self.cross.interp.init_dim('z',interparr)
self.hsmall=mean_1d(self.helper.h,self.geom)
self.hhsmall=mean_1d(self.helper.hh,self.geom)
self.calc_specmask()
self.make_topo()
super(statgroup_level,self).app_tstep(data)
self.interp.closer()
self.interp1d.closer()
self.cross.closer()
self.cross.interp.closer()
self.spectra.closer()
def make_var(self,var,mask=''):
if(shape(self.gv(var))[1]==shape(self.gdim('level1'))[0] and mask==''):
zlev='levelf'
else:
zlev='levelh'
if(self.geom=='xz'):
self.try_init_dirvar(var,('time',zlev,'x'),mask=mask)
self.interp.try_init_dirvar(var,('time','z','x'),mask=mask)
if(mask==''):
self.cross.try_init_dirvar(var,('time',zlev,'x'),mask=mask)
self.cross.interp.try_init_dirvar(var,('time','z','x'),mask=mask)
elif(self.geom=='yz'):
self.try_init_dirvar(var,('time',zlev,'y'),mask=mask)
self.interp.try_init_dirvar(var,('time','z','y'),mask=mask)
if(mask==''):
self.cross.try_init_dirvar(var,('time',zlev,'y'),mask=mask)
self.cross.interp.try_init_dirvar(var,('time','z','y'),mask=mask)
self.interp1d.try_init_dirvar(var,('time','z'),mask=mask)
def make_dervar(self,var,mask='',zlev='levelh',vtype=''):
if(self.geom=='xz'):
self.try_init_der_var(var,('time',zlev,'x'),mask=mask,vtype=vtype)
self.interp.try_init_der_var(var,('time','z','x'),mask=mask,vtype=vtype)
if(mask=='' and vtype==''):
self.cross.try_init_der_var(var,('time',zlev,'x'),mask=mask,vtype=vtype)
self.cross.interp.try_init_der_var(var,('time','z','x'),mask=mask,vtype=vtype)
elif(self.geom=='yz'):
self.try_init_der_var(var,('time',zlev,'y'),mask=mask,vtype=vtype)
self.interp.try_init_der_var(var,('time','z','y'),mask=mask,vtype=vtype)
if(mask=='' and vtype==''):
self.cross.try_init_der_var(var,('time',zlev,'y'),mask=mask,vtype=vtype)
self.cross.interp.try_init_der_var(var,('time','z','y'),mask=mask,vtype=vtype)
self.interp1d.try_init_der_var(var,('time','z'),mask=mask,vtype=vtype)
def app_dervars(self):
# append the derived variables
for var in self.dervars:
self.make_dervar(var)
if(self.sampvars[var]==True):
for mask in self.masks.keys():
self.make_dervar(var,mask=mask)
dw=deviation_1d(self.gv('W'),self.geom)
dwplus=dw[:,1:,:,:]
dwmin=dw[:,:-1,:,:]
dwh=0.5*dwplus+dwmin
wvar=0.5*(dwplus*dwplus+dwmin*dwmin)
del dwplus,dwmin #decrease mem footprint explicitly
du=deviation_1d(self.gv('U'),self.geom)
dv=deviation_1d(self.gv('V'),self.geom)
if 'cosmo_tke' in marker:
tkeh=0.5*(self.gv('TKE')[:,1:,:,:]+self.gv('TKE')[:,:-1,:,:])
else:
tkeh=0.5*((2.0*self.gv('TKE')[:,1:,:,:])**0.5+(2.0*self.gv('TKE')[:,:-1,:,:])**0.5)
self.put_mean_int_mask('TKE',tkeh)
self.put_mean_int_mask('LVTTKE',tkeh+0.5*(du*du+dv*dv)+0.5*wvar) # Total TKE wrt 1D mean (hence LV) circulation (includes resolved and sgs)
self.put_mean_int_mask('LVW2RES',wvar) # vertical component of this, both with and without sgs
self.put_mean_int_mask('LVW2TOTLES',wvar+(2./3.)*tkeh)
del tkeh,wvar
iexnf=(self.gv('P')/pref)**(-rd/cpd) # inverse exner function
theta=self.gv('T')*iexnf
thl=theta-(rlv/cpd)*self.gv('QC')*iexnf #liquid water potential temperature
self.put_mean_int_mask('THETA',theta)
self.put_mean_int_mask('THL',thl)
dtheta=deviation_1d(theta,self.geom)
del theta,thl
self.put_mean_int_mask('QCTOT',self.gv('CLW_CON')*self.gv('CLC_CON')+self.gv('QC')) # qc including (convective!) sgs contribution
self.put_mean_int_mask('QT',self.helper.qt)
self.put_mean_int_mask('RHO',self.helper.rhoh)
self.put_mean_int_mask('THETARHO',self.helper.thetarhoh)
mthetarhoh=mean_1d(self.helper.thetarhoh,self.geom)
if self.geom=='xz':
buoy=grav*(self.helper.thetarhoh-mthetarhoh[:,:,None,:])/mthetarhoh[:,:,None,:]
else:
buoy=grav*(self.helper.thetarhoh-mthetarhoh[:,:,:,None])/mthetarhoh[:,:,:,None]
self.put_mean_int_mask('LVBUOY',buoy) #buoyancy wrt 1d circulation
self.put_mean_int_mask('LVBUOYWRES',self.helper.rhoh*dwh*buoy) #buoyancy-flux wrt 1d circulation
del buoy
dqt=deviation_1d(self.helper.qt,self.geom)
self.put_mean_int_mask('RHOW',self.helper.rhoh*self.helper.wh) # mass flux
self.put_mean_int_mask('HH',self.helper.hh)
self.put_mean_int_mask('LVRHOUWRES',self.helper.rhoh*dwh*du) # momentum fluxes
self.put_mean_int_mask('LVRHOVWRES',self.helper.rhoh*dwh*dv)
self.put_mean_int_mask('LVRHOTHETAWRES',self.helper.rhoh*dwh*dtheta)
self.put_mean_int_mask('LVRHOQTWRES',self.helper.rhoh*dwh*dqt)
self.put_mean_int_mask('CLC_RES',self.helper.cld) # resolved cloud cover
self.put_mean_int_mask('CLC_TOT',self.helper.cld+self.gv('CLC_CON')*(1.0-self.helper.cld)) # only counts sgs clouds when no grid scale clouds
# total advective tendencies
if lbud:
self.put_mean_int_mask('AQVT_ADVTOT',self.gv('AQVT_ZADV')+self.gv('AQVT_ADV'))
self.put_mean_int_mask('AQTT_ADVTOT',self.gv('AQVT_ZADV')+self.gv('AQVT_ADV')+self.gv('AQCIT_ZADV')+self.gv('AQCIT_ADV'))
del dtheta,dqt,du,dv,dwh
# calculate q_sat and RH
psat = b1 * exp( b2w*(self.gv('T')-b3)/(self.gv('T')-b4w) )
qsat = (rd/rv)*psat/(self.gv('P')-(1.0-rd/rv)*psat)
self.put_mean_int_mask('QSAT',qsat)
self.put_mean_int_mask('RH',self.helper.qt/qsat)
del psat,qsat
psati = b1 * exp( b2i*(self.gv('T')-b3)/(self.gv('T')-b4i) )
qsati = (rd/rv)*psati/(self.gv('P')-(1.0-rd/rv)*psati)
self.put_mean_int_mask('QSATI',qsati)
self.put_mean_int_mask('RHI',self.helper.qt/qsati)
del psati,qsati
if lbud:
for proc in proclist:
self.put_mean_int_mask('QTT_'+proc,self.gv('QVT_'+proc)+self.gv('QCIT_'+proc))
self.put_mean_int_mask('AQTT_'+proc,self.gv('AQVT_'+proc)+self.gv('AQCIT_'+proc))
# resolved mass-fluxes using mask criteria
for mask in self.masks.keys():
self.make_dervar(str(mask)+'mf',vtype='mf')
self.put_mean_int(str(mask)+'mf',self.masks[mask].field*self.helper.rhoh*self.helper.wh)
def make_topo(self):
lenxory=shape(self.hsmall)[2]
lent=shape(self.hsmall)[0]
lenout=len(interparr)
self.interphsmall=zeros((lent,lenout,lenxory))
self.interphhsmall=zeros((lent,lenout,lenxory))
for t in xrange(lent):
for i in xrange(lenxory):
self.interphhsmall[t,:,i]=1.0*(interparr[:]>self.hhsmall[t,-1,i])
for t in xrange(lent):
for i in xrange(lenxory):
self.interphsmall[t,:,i]=1.0*(interparr[:]>self.hsmall[t,-1,i])
self.interphsmall[self.interphsmall==0.0]=np.nan
self.interphhsmall[self.interphhsmall==0.0]=np.nan
# higher level put_var routines that do interpolation as well
def put_mean_int(self,var,field):
self.put_var_int(var,mean_1d(field,self.geom))
def put_var_int(self,var,field):
self.put_var(var,field)
lenxory=shape(field)[2]
lenz=shape(field)[1]
lent=shape(field)[0]
lenout=len(interparr)
varinterp=zeros((lent,lenout,lenxory))
if(lenz==shape(self.hhsmall)[1]):
int_to_height(varinterp,interparr,field,self.hhsmall,lent,lenz,lenxory,lenout)
self.interp.put_var(var,varinterp*self.interphhsmall)
self.interp1d.put_var(var,nansum(varinterp*self.interphhsmall,axis=2)/nansum(self.interphhsmall,axis=2))
else:
int_to_height(varinterp,interparr,field,self.hsmall,lent,lenz,lenxory,lenout)
self.interp.put_var(var,varinterp*self.interphsmall)
self.interp1d.put_var(var,nansum(varinterp*self.interphsmall,axis=2)/nansum(self.interphsmall,axis=2))
# higher level put_var routines that do interpolation and masks
def put_var_int_mask(self,var,field,mask):
self.put_var(var,field/self.maskfrac[mask])
lenxory=shape(field)[2]
lenz=shape(field)[1]
lent=shape(field)[0]
lenout=len(interparr)
fminterp=zeros((lent,lenout,lenxory))
mfinterp=zeros((lent,lenout,lenxory))
varinterp=zeros((lent,lenout,lenxory))
int_to_height(varinterp,interparr,field/self.maskfrac[mask],self.hhsmall,lent,lenz,lenxory,lenout)
int_to_height(fminterp,interparr,field,self.hhsmall,lent,lenz,lenxory,lenout)
int_to_height(mfinterp,interparr,self.maskfrac[mask],self.hhsmall,lent,lenz,lenxory,lenout)
self.interp.put_var(var,varinterp*self.interphhsmall)
self.interp1d.put_var(var,nansum(fminterp,axis=2)/nansum(mfinterp,axis=2))
def put_mean_int_mask(self,var,field):
self.put_var_int(var,mean_1d(field,self.geom))
self.crossput_mean_int(var,extract_1d(field,self.geom))
if(self.sampvars[var]==True):
for mask in self.masks.keys():
if(shape(field)[1]==shape(self.hhsmall)[1]):
self.put_var_int_mask(var+mask,mean_1d(field*self.masks[mask].field,self.geom),mask)
else:
f=0.5*(field[:,1:,:,:]+field[:,:-1,:,:])
self.put_var_int_mask(var+mask,mean_1d(f*self.masks[mask].field,self.geom),mask)
if(self.specvars[var]==True):
if(shape(field)[1]==shape(self.hhsmall)[1]):
self.make_spec(var,field)
else:
self.make_spec(var,field[:,1:,:,:])
# higher level put_var routines that do interpolation as well, for cross sections
# could probably be merged with put_mean_int routine
def crossput_mean_int(self,var,field):
pass
self.cross.put_var(var,field)
lenxory=shape(field)[2]
lenz=shape(field)[1]
lent=shape(field)[0]
lenout=len(interparr)
varinterp=zeros((lent,lenout,lenxory))
if(lenz==shape(self.hhsmall)[1]):
int_to_height(varinterp,interparr,field,self.hhsmall,lent,lenz,lenxory,lenout)
self.cross.interp.put_var(var,varinterp*self.interphhsmall)
else:
int_to_height(varinterp,interparr,field,self.hsmall,lent,lenz,lenxory,lenout)
self.cross.interp.put_var(var,varinterp*self.interphsmall)
def init_vdims(self):
self.init_dim('levelf',self.gdim('level1'))
self.init_dim('levelh',self.gdim('level'))
# class specific to height statistics
class statgroup_height(statgroup):
def __init__(self,geom):
super(statgroup_height,self).__init__(geom,geom+'z.'+marker+'.nc')
self.dirvars=dirvarsheight
self.dervars=dervarsheight
self.dervarsunits=dervarsunitsheight
self.helper=heighthelper
self.spectra=statgroup_spectra(geom,'spectraz.'+marker+'.nc')
self.spectra.dervarsunits=dervarsunitsheight
self.v1d=statgroup_heightprof('prof1d.'+marker+'.nc')
self.v1d.dervarsunits=dervarsunitsheight
self.cross=crossgroup_height(self.geom,'crossz.'+marker+'.nc')
self.cross.dervarsunits=dervarsunitsheight
self.init_masks(['cld','cldcr','upd','cldupd','cldupdw1'])
self.make_sampvars(sampvars)
self.make_specvars(specvars)
def calc_masks(self):
w=self.gv('W')
# this is where the actual masks are calculated
self.masks['cld'].setfield(self.helper.cld)
self.masks['cldcr'].setfield(self.helper.cld*(self.deviation_2d(self.helper.rhoh)<0.0))
self.masks['upd'].setfield((w>0.0))
self.masks['cldupd'].setfield(self.helper.cld*(w>0.0))
self.masks['cldupdw1'].setfield(self.helper.cld*(w>1.0))
self.mask_fracs()
self.specmask_fracs()
self.mask_fracs_2d()
def app_tstep(self,data):
self.v1d.opener(data)
self.cross.opener(data)
self.spectra.opener(data)
if(self.tstep==0):
self.v1d.init_dimz()
self.hhsmall=self.helper.hhsmall
self.calc_specmask()
super(statgroup_height,self).app_tstep(data)
self.v1d.closer()
self.cross.closer()
self.spectra.closer()
def make_var(self,var,mask=''):
if(self.geom=='xz'):
self.try_init_dirvar(var,('time','z','x'),mask=mask)
if(mask==''):
self.cross.try_init_dirvar(var,('time','z','x'),mask=mask)
elif(self.geom=='yz'):
self.try_init_dirvar(var,('time','z','y'),mask=mask)
if(mask==''):
self.cross.try_init_dirvar(var,('time','z','y'),mask=mask)
self.v1d.try_init_dirvar(var,('time','z'),mask=mask)
def make_dervar(self,var,mask='',vtype=''):
if(self.geom=='xz'):
self.try_init_der_var(var,('time','z','x'),mask=mask,vtype=vtype)
if(mask=='' and vtype==''):
self.cross.try_init_der_var(var,('time','z','x'),mask=mask,vtype=vtype)
elif(self.geom=='yz'):
self.try_init_der_var(var,('time','z','y'),mask=mask,vtype=vtype)
if(mask=='' and vtype==''):
self.cross.try_init_der_var(var,('time','z','y'),mask=mask,vtype=vtype)
self.v1d.try_init_der_var(var,('time','z'),mask=mask,vtype=vtype)
def app_dervars(self):
for var in self.dervars:
self.make_dervar(var)
if(self.sampvars[var]==True):
for mask in self.masks.keys():
self.make_dervar(var,mask=mask)
w=self.gv('W')
du=self.deviation_2d(self.gv('U'))
dv=self.deviation_2d(self.gv('V'))
dw=self.deviation_2d(w)
dqt=self.deviation_2d(self.helper.qt)
iexnf=(self.gv('P')/pref)**(-rd/cpd) #inverse exner function
theta=self.gv('T')*iexnf
thl=theta-(rlv/cpd)*self.gv('QC')*iexnf
dthl=self.deviation_2d(thl)
tke=self.gv('TKE')
ttke=tke+0.5*(du*du+dv*dv+dw*dw) # includes mean circulation
self.put_mean_int_mask('BUOY',self.helper.buoy)
self.put_mean_int_mask('TKE',tke)
self.put_mean_int_mask('TTKE',ttke)
self.put_mean_int_mask('RHO',self.helper.rhoh)
# variances, * preferred over **2 for computational reasons
self.put_mean_int_mask('QTP2',dqt*dqt)
self.put_mean_int_mask('THLP2',dthl*dthl)
self.put_mean_int_mask('BUOYP2',self.helper.buoy*self.helper.buoy)
self.put_mean_int_mask('QTP',dqt)
self.put_mean_int_mask('RHOWBUOY',self.helper.rhoh*w*self.helper.buoy)
self.put_mean_int_mask('W2',w*w)
for mask in self.masks.keys():
self.make_dervar(str(mask)+'mf',vtype='mf')
self.put_mean_int(str(mask)+'mf',self.masks[mask].field*self.helper.rhoh*self.gv('W'))
def deviation_2d(self,field):
if(self.geom=='xz'):
mfield=mean2d(field*self.helper.topomask[None,:,None,:])[:,:,None,None]/mean2d(self.helper.topomask[None,:,None,:])[:,:,None,None]
pfield=(field*self.helper.topomask[None,:,None,:]-mfield)
elif(self.geom=='yz'):
mqv=mean2d(field*self.helper.topomask[None,:,:,None])[:,:,None,None]/mean2d(self.helper.topomask[None,:,:,None])[:,:,None,None]
pfield=(field*self.helper.topomask[None,:,:,None]-mfield)
return pfield
def put_mean_int(self,var,field):
if(self.geom=='xz'):
self.put_var(var,mean_1d(field*self.helper.topomask[None,:,None,:],self.geom)*self.helper.topomasknan[None,:,:])
self.v1d.put_var(var,mean2d(field*self.helper.topomask[None,:,None,:]))
elif(self.geom=='yz'):
self.put_var(var,mean_1d(field*self.helper.topomask[None,:,:,None],self.geom)*self.helper.topomasknan[None,:,:])
self.v1d.put_var(var,mean2d(field*self.helper.topomask[None,:,:,None]))
def put_mean_int_mask(self,var,field):
self.put_var(var,mean_1d(field,self.geom)*self.helper.topomasknan)
self.cross.put_var(var,extract_1d(field,self.geom)*self.helper.topomasknan)
self.v1d.put_var(var,mean2d(field))
if(self.sampvars[var]==True):
for mask in self.masks.keys():
self.put_var(var+mask,mean_1d(field*self.masks[mask].field,self.geom)/self.maskfrac[mask])
self.v1d.put_var(var+mask,mean2d(field*self.masks[mask].field)/self.maskfrac2d[mask])
if(self.specvars[var]==True):
if(shape(field)[1]==shape(self.hhsmall)[1]):
self.make_spec(var,field)
else:
self.make_spec(var,field[:,1:,:,:])
def init_vdims(self):
self.init_dimz()
# general class for 2d profiles,cross sections, interpolated profiles
# actual processing is part of the statgroup routines
# (for computational reasons)
class statgroup_heightprof(ncobject):
pass
class statgroup_interp(statgroup):
pass
class crossgroup_level(statgroup):
def init_vdims(self):
self.init_dim('levelf',self.gdim('level1'))
self.init_dim('levelh',self.gdim('level'))
class crossgroup_height(statgroup):
def init_vdims(self):
self.init_dimz()
class crossgroup_interp(statgroup):
pass
def detect_geometry():
# detect if the geometry of the case is 2-dimensional in any of the directions
# using HHL[0] of the first .nc file of the levels type
# hlower will also be used to do the topographic filtering later on
# also determine how interpolation is done
global geom,hlower,interparr
thresh=0.01
print filelist['level'][0]
try:
hlower=var_from_file(Dataset(filelist['level'][0],'r'),'HHL')[0,-1,:,:]
except:
print 'could not detect geometry'
raise
if(var(hlower[:,0])<thresh):
geom='xz'
elif(var(hlower[0,:])<thresh):
geom='yz'
else:
geom='2d'
print('the geometry of this experiment is detected as '+geom)
# special case for 2d experiments
if(shape(hlower)[0]==1):
geom='xz'
# derive heights where interpolation needed
hmax=nanmax(var_from_file(Dataset(filelist['level'][0],'r'),'HHL'))
# directly take input heights if no interpolation needed
if(var(hlower[:,:])<thresh):
interparr=var_from_file(Dataset(filelist['level'][0],'r'),'HHL')[0,::-1,0,0]
elif(hmax>12000.):
interparr=arange(0.,hmax,100.)
elif(hmax>6000.):
interparr=arange(0.,hmax,50.)
elif(hmax>2000.):
interparr=arange(0.,hmax,20.)
else:
interparr=arange(0.,hmax,10.)
# process level based data
def process_levelbased():
global levelhelper
levelhelper=nclevelhelper(geom)
levelout=statgroup_level(geom)
intlevelout=statgroupintlevel()
domlevelout=statgroup_domlevel()
hovlevelout=statgroup_hovlevel(geom)
if(geom == '2d'):
print('no level-statistics are created')
for file_to_process in filelist['level']:
cosmodata=Dataset(file_to_process,'r',format='NETCDF4')
levelhelper.update(cosmodata)
if(geom != '2d'):
levelout.app_tstep(cosmodata)
intlevelout.app_tstep(cosmodata)
domlevelout.app_tstep(cosmodata)
hovlevelout.app_tstep(cosmodata)
print clock()-start
# process height based data
def process_heightbased():
global heighthelper
heighthelper=ncheighthelper(geom)
heightout=statgroup_height(geom)
intheightout=statgroupintheight()
domheightout=statgroup_domheight()
hovheightout=statgroup_hovheight(geom)
for file_to_process in filelist['height']:
cosmodata=Dataset(file_to_process,'r',format='NETCDF4')
heighthelper.update(cosmodata)
heightout.app_tstep(cosmodata)
intheightout.app_tstep(cosmodata)
domheightout.app_tstep(cosmodata)
hovheightout.app_tstep(cosmodata)
print clock()-start
# replace missing values for reading in ncview
# and copy to project (storage) directory
def copy_files_to_project():
outfiles=glob.glob(outdir+'*'+marker+'*.nc')
outfilescloud=glob.glob(outdir+'clouds/*.'+marker+'.nc')
for outfile in outfiles+outfilescloud:
print outfile
outfiledata=Dataset(outfile,'r+',format='NETCDF4')
for var in outfiledata.variables:
if(type(outfiledata.variables[(var)])==np.ma.masked_array):
vardata=outfiledata.variables[(var)][:]
vardata=vardata.filled(nan)
whereinf=isinf(vardata);
vardata[whereinf]=nan
outfiledata.close()
print clock()-start
for outfile in outfiles:
shutil.copy(outfile,projectdir)
make_tarfile(outdir+'clouds.'+marker+'.tar',glob.glob(outdir+'clouds/*.'+marker+'.nc'))
shutil.copy(outdir+'clouds.'+marker+'.tar',projectdir)
# update the variables to post-process by level type
def update_variables():
global dirvarslevel,dirvarsheight
global dervarslevel,dervarsheight
global dirintvarslevel,dirintvarsheight
global derintvarslevel,derintvarsheight
global dirhovvarslevel,dirhovvarsheight
global derhovvarslevel,derhovvarsheight
global derdomvarslevel,derdomvarsheight
global derintvarsunitslevel,derintvarsunitsheight
global dervarsunitslevel
global sampvars
#add budget variables
if lbud:
for proc in proclist:
dirvarslevel+=['TT_'+proc,'ATT_'+proc,'QVT_'+proc,'AQVT_'+proc,'QCIT_'+proc,'AQCIT_'+proc,]
dervarsunitslevel.update({'AQTT_'+proc:u'kg kg-1 s-1','QTT_'+proc:u'kg kg-1 s-1',})
sampvars+=[
'AQTT_MIC',
'AQTT_TURB',
'AQTT_ADVTOT',
'AQTT_HD'
]
dervarsunitslevel.update({'AQVT_ADVTOT':u'kg kg-1 s-1','AQTT_ADVTOT':u'kg kg-1 s-1'})
#direct variables
dirvarsheight=dirvarslevel
# add domain mean variables to hovmoeller variables
dirhovvarslevel+=dirdomvarslevel
dirhovvarsheight+=dirdomvarsheight
derhovvarsunitslevel.update(derdomvarsunitslevel)
derhovvarsunitsheight.update(derdomvarsunitsheight)
# add hovmoeller variables to xy (int) variables
dirintvarslevel+=dirhovvarslevel
dirintvarsheight+=dirhovvarsheight
derintvarsunitslevel.update(derhovvarsunitslevel)
derintvarsunitsheight.update(derhovvarsunitsheight)
# make list of derived variables separted from keys
dervarsheight=dervarsunitsheight.keys()
dervarslevel=dervarsunitslevel.keys()
derintvarslevel=derintvarsunitslevel.keys()
derintvarsheight=derintvarsunitsheight.keys()
derdomvarslevel=derdomvarsunitslevel.keys()
derdomvarsheight=derdomvarsunitsheight.keys()
derhovvarslevel=derhovvarsunitslevel.keys()
derhovvarsheight=derhovvarsunitsheight.keys()
# general class for the crosssections
# take into account areas below topography as nans
# use just one variable per file!
class xycross_onevar(ncobject):
def __init__(self,var,outfile):
super(xycross_onevar,self).__init__(outfile)
self.dirvars=[var]
def make_topo(self):
alt=self.gdim('altitude')
lalt=len(alt)
self.topomask=zeros((lalt,shape(hlower)[0],shape(hlower)[1]),int)
self.topomasknan=zeros((lalt,shape(hlower)[0],shape(hlower)[1]),float)
for k in xrange(lalt):
for j in xrange(shape(hlower)[0]):
for i in xrange(shape(hlower)[1]):
self.topomask[k,j,i]=1*(alt[k]>hlower[j,i])
self.topomasknan=zeros(shape(self.topomask))
self.topomasknan[:]=self.topomask[:]
self.topomasknan[self.topomask==0]=np.nan
def app_dirvars(self):
if(self.tstep==0):
self.make_topo()
for var in self.dirvars:
if var in self.varkeys:
self.make_var(var)
self.put_var(var,self.gv(var)*self.topomasknan)
def set_dims(self):
self.init_vdims()
self.init_hdims()
def init_vdims(self):
self.init_dimz()
def init_hdims(self):
self.init_dimx()
self.init_dimy()
def make_var(self,var):
self.try_init_dirvar(var,('time','z','y','x'))
def compress_crossxy():
vardata=Dataset(filelist['crossxy'][0])
varkeys=vardata.variables.keys()
compressed_files={}
for var in varkeys:
if len(shape(vardata.variables[var]))==4:
compressed_files[var]=xycross_onevar(var,'crossxy.'+var+'.'+marker+'.nc')
for file_to_process in filelist['crossxy']:
cosmodata=Dataset(file_to_process,'r',format='NETCDF4')
for var in varkeys:
if len(shape(vardata.variables[var]))==4:
compressed_files[var].app_tstep(cosmodata)
# general class for compressed files containing satellite output
# here we save with reduced precision and compress a lot
class satvars_compressed(ncobject):
def __init__(self,varlist,outfile):
super(satvars_compressed,self).__init__(outfile)
self.dirvars=varlist
def app_dirvars(self):
for var in self.dirvars:
if var in self.varkeys:
self.make_var(var)
self.put_var(var,self.gv(var))
def try_init_dirvar(self,var,dims):
if(self.tstep==0):
so=self.outfile.createVariable(var,'f4', dims,zlib=lzlib,least_significant_digit=6)
so.missing_value = nan
so.long_name=self.data.variables[var].long_name
so.units=self.gu(var)
def set_dims(self):
self.init_vdims()
self.init_hdims()
def init_vdims(self):
self.init_dimchannels()
def init_hdims(self):
self.init_dimx()
self.init_dimy()
def make_var(self,var):
self.try_init_dirvar(var,('time','nsynmsg','y','x'))
def compress_sats():
if len(filelist['sats'])==0:
return
vardata=Dataset(filelist['sats'][0])
varkeys=vardata.variables.keys()
vars_to_process=[]
for var in varkeys:
if len(shape(vardata.variables[var]))==4:
vars_to_process.append(var)
satout=satvars_compressed(vars_to_process,'sats.'+marker+'.nc')
for file_to_process in filelist['sats']:
cosmodata=Dataset(file_to_process,'r',format='NETCDF4')
satout.app_tstep(cosmodata)
# general class for compressed files containing cloud stats
# here we save with reduced precision and compress a lot
class cloudvars_compressed(ncobject):
def __init__(self,varlist,outfile):
super(cloudvars_compressed,self).__init__(outfile)
self.dirvars=varlist
def app_dirvars(self):
for var in self.dirvars:
if var in self.varkeys:
self.make_var(var)
self.put_var(var,self.gv(var))
self.add_hsurf()
def try_init_dirvar(self,var,dims):
if(self.tstep==0):
so=self.outfile.createVariable(var,'f4', dims,zlib=lzlib,least_significant_digit=6)
so.missing_value = nan
so.long_name=self.data.variables[var].long_name
so.units=self.gu(var)
def set_dims(self):
self.init_vdims()
self.init_hdims()
def init_vdims(self):
self.init_dimz()
def init_hdims(self):
self.init_dimx()
self.init_dimy()
def make_var(self,var):
self.try_init_dirvar(var,('time','z','y','x'))
def add_hsurf(self):
self.try_init_dirvar('HSURF',('time','y','x'))
self.put_var('HSURF',self.gv_2d('HSURF'))
def compress_clouds():
vardata=Dataset(filelist['crossxy'][0])
varkeys=vardata.variables.keys()
vars_to_process=[]
for var in varkeys:
if len(shape(vardata.variables[var]))==4 and (var!='QV'): #do not include QV, HSURF and dimensions
vars_to_process.append(var)
for file_to_process in filelist['cloud']:
cloudout=cloudvars_compressed(vars_to_process,'clouds/cloud.'+file_to_process[-13:-5]+'.'+marker+'.nc')
cosmodata=Dataset(file_to_process,'r',format='NETCDF4')
cloudout.app_tstep(cosmodata)
########### MAIN PROGRAM ###########
def runme():
update_variables()
mkdir_p(outdir)
mkdir_p(outdir+'/clouds')
mkdir_p(projectdir)
make_filelist()
detect_geometry()
process_levelbased()
process_heightbased()
if lcross:
compress_crossxy()
if lclouds:
compress_clouds()
if lsats:
compress_sats()
copy_files_to_project()
# ACTUALLY CALLS THE SCRIPT FROM THE COMMAND LINE
# Using: if __name__ == "__main__"
# makes sure we can import the separate routines
if __name__ == "__main__":
case=sys.argv[1]
conf=sys.argv[2]
exper=sys.argv[3]
fulldir='/scratch/daint/'+myusername+'/exe/csim/fromtempl/'+case+'/'+conf+'/'+exper+'/output/'
outdir='/scratch/daint/'+myusername+'/statdump/'+case+'/'+exper+'/'
projectdir='/project/ch4/'+myusername+'/statdump/'+case+'/'+exper+'/'
marker=conf
runme()
| lgpl-3.0 |
dsullivan7/scikit-learn | examples/linear_model/plot_sgd_iris.py | 286 | 2202 | """
========================================
Plot multi-class SGD on the iris dataset
========================================
Plot decision surface of multi-class SGD on iris dataset.
The hyperplanes corresponding to the three one-versus-all (OVA) classifiers
are represented by the dashed lines.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.linear_model import SGDClassifier
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
colors = "bry"
# shuffle
idx = np.arange(X.shape[0])
np.random.seed(13)
np.random.shuffle(idx)
X = X[idx]
y = y[idx]
# standardize
mean = X.mean(axis=0)
std = X.std(axis=0)
X = (X - mean) / std
h = .02 # step size in the mesh
clf = SGDClassifier(alpha=0.001, n_iter=100).fit(X, y)
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.axis('tight')
# Plot also the training points
for i, color in zip(clf.classes_, colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1], c=color, label=iris.target_names[i],
cmap=plt.cm.Paired)
plt.title("Decision surface of multi-class SGD")
plt.axis('tight')
# Plot the three one-against-all classifiers
xmin, xmax = plt.xlim()
ymin, ymax = plt.ylim()
coef = clf.coef_
intercept = clf.intercept_
def plot_hyperplane(c, color):
def line(x0):
return (-(x0 * coef[c, 0]) - intercept[c]) / coef[c, 1]
plt.plot([xmin, xmax], [line(xmin), line(xmax)],
ls="--", color=color)
for i, color in zip(clf.classes_, colors):
plot_hyperplane(i, color)
plt.legend()
plt.show()
| bsd-3-clause |
gimli-org/gimli | doc/examples/dev/multi/fluidFlow.py | 1 | 2829 | #!/usr/bin/env python
"""
Fluid flow stuff .. calc
- static velocity field
- calc nonsteady advection/diffusion
"""
import matplotlib.pyplot as plt
import numpy as np
import pygimli as pg
from pygimli import physics
from pygimli.solver import parseArgToArray
from pygimli.solver import *
def createCacheName(base, mesh, timeSteps=[]):
return 'cache-' + base + "-" + str(mesh.nodeCount()) + '-' + str(len(timeSteps))
def hydraulicConductivity(perm, visc=1.0, dens=1.0,
mesh=None, meshI=None):
perm = parseArgToArray(perm, mesh.cellCount(), mesh)
visc = parseArgToArray(visc, mesh.cellCount(), mesh)
dens = parseArgToArray(dens, mesh.cellCount(), mesh)
k = perm * dens/visc * pg.physics.constants.g
if meshI:
k = pg.interpolate(mesh, k, meshI.cellCenters())
k = pg.solver.fillEmptyToCellArray(meshI, k)
return k
def calcStokesVelocity(mesh, visc, velBoundary, preBoundary):
solutionName = createCacheName('vel', mesh)
try:
#vel = pg.load(solutionName + '.bmat')
vel = np.load(solutionName + '.bmat.npy')
except Exception as e:
print(e)
print("Building .... ")
class WS:
pass
ws = WS
vel, pres, pCNorm, divVNorm = solveStokes_NEEDNAME(mesh, velBoundary,
preBoundary,
viscosity=visc,
maxIter=1000,
tol=1e-4, pRelax=0.1,
verbose=1, ws=ws)
np.save(solutionName + '.bmat', vel)
return vel
def calcConcentration(mesh, vel, times, injectPos, peclet=50, scale=1):
r"""
.. math::
"""
solutionName = createCacheName('conc', mesh, times)
try:
conc = np.load(solutionName + '.bmat.npy')
except Exception as e:
print(e)
print("Building .... ")
f = pg.Vector(mesh.cellCount(), 0.0)
sourceCell=mesh.findCell(injectPos)
f[sourceCell.id()] = scale
print(sourceCell.size())
uMesh1 = solveFiniteVolume(mesh, a=1./peclet, f=f, vel=vel, times=times,
uBoundary=[2, 0],
scheme='PS', verbose=10)
uMesh2 = solveFiniteVolume(mesh, a=1./peclet, f=0, vel=vel, times=times,
uBoundary=[2, 0], u0=uMesh1[-1],
scheme='PS', verbose=10)
conc = np.vstack((uMesh1, uMesh2[1:]))
np.save(solutionName + '.bmat', conc)
return conc
if __name__ == "__main__":
pass | apache-2.0 |
mlindauer/AutoFolio | autofolio/selector/pairwise_classification.py | 1 | 4551 | import logging
import traceback
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from ConfigSpace.hyperparameters import CategoricalHyperparameter, \
UniformFloatHyperparameter, UniformIntegerHyperparameter
from ConfigSpace.conditions import EqualsCondition, InCondition
from ConfigSpace.configuration_space import ConfigurationSpace
from ConfigSpace import Configuration
from aslib_scenario.aslib_scenario import ASlibScenario
__author__ = "Marius Lindauer"
__license__ = "BSD"
class PairwiseClassifier(object):
@staticmethod
def add_params(cs: ConfigurationSpace):
'''
adds parameters to ConfigurationSpace
'''
selector = cs.get_hyperparameter("selector")
classifier = cs.get_hyperparameter("classifier")
if "PairwiseClassifier" in selector.choices:
cond = InCondition(child=classifier, parent=selector, values=["PairwiseClassifier"])
cs.add_condition(cond)
def __init__(self, classifier_class):
'''
Constructor
'''
self.classifiers = []
self.logger = logging.getLogger("PairwiseClassifier")
self.classifier_class = classifier_class
self.normalizer = MinMaxScaler()
def fit(self, scenario: ASlibScenario, config: Configuration):
'''
fit pca object to ASlib scenario data
Arguments
---------
scenario: data.aslib_scenario.ASlibScenario
ASlib Scenario with all data in pandas
config: ConfigSpace.Configuration
configuration
'''
self.logger.info("Fit PairwiseClassifier with %s" %
(self.classifier_class))
self.algorithms = scenario.algorithms
from sklearn.utils import check_array
from sklearn.tree._tree import DTYPE
n_algos = len(scenario.algorithms)
X = scenario.feature_data.values
# since sklearn (at least the RFs)
# uses float32 and we pass float64,
# the normalization ensures that floats
# are not converted to inf or -inf
#X = (X - np.min(X)) / (np.max(X) - np.min(X))
X = self.normalizer.fit_transform(X)
for i in range(n_algos):
for j in range(i + 1, n_algos):
y_i = scenario.performance_data[scenario.algorithms[i]].values
y_j = scenario.performance_data[scenario.algorithms[j]].values
y = y_i < y_j
weights = np.abs(y_i - y_j)
clf = self.classifier_class()
clf.fit(X, y, config, weights)
self.classifiers.append(clf)
def predict(self, scenario: ASlibScenario):
'''
predict schedules for all instances in ASLib scenario data
Arguments
---------
scenario: data.aslib_scenario.ASlibScenario
ASlib Scenario with all data in pandas
Returns
-------
schedule: {inst -> (solver, time)}
schedule of solvers with a running time budget
'''
if scenario.algorithm_cutoff_time:
cutoff = scenario.algorithm_cutoff_time
else:
cutoff = 2**31
n_algos = len(scenario.algorithms)
X = scenario.feature_data.values
X = self.normalizer.transform(X)
scores = np.zeros((X.shape[0], n_algos))
clf_indx = 0
for i in range(n_algos):
for j in range(i + 1, n_algos):
clf = self.classifiers[clf_indx]
Y = clf.predict(X)
scores[Y == 1, i] += 1
scores[Y == 0, j] += 1
clf_indx += 1
#self.logger.debug(
# sorted(list(zip(scenario.algorithms, scores)), key=lambda x: x[1], reverse=True))
algo_indx = np.argmax(scores, axis=1)
schedules = dict((str(inst),[s]) for s,inst in zip([(scenario.algorithms[i], cutoff+1) for i in algo_indx], scenario.feature_data.index))
#self.logger.debug(schedules)
return schedules
def get_attributes(self):
'''
returns a list of tuples of (attribute,value)
for all learned attributes
Returns
-------
list of tuples of (attribute,value)
'''
class_attr = self.classifiers[0].get_attributes()
attr = [{self.classifier_class.__name__:class_attr}]
return attr | bsd-2-clause |
CVML/scikit-learn | examples/applications/face_recognition.py | 191 | 5513 | """
===================================================
Faces recognition example using eigenfaces and SVMs
===================================================
The dataset used in this example is a preprocessed excerpt of the
"Labeled Faces in the Wild", aka LFW_:
http://vis-www.cs.umass.edu/lfw/lfw-funneled.tgz (233MB)
.. _LFW: http://vis-www.cs.umass.edu/lfw/
Expected results for the top 5 most represented people in the dataset::
precision recall f1-score support
Ariel Sharon 0.67 0.92 0.77 13
Colin Powell 0.75 0.78 0.76 60
Donald Rumsfeld 0.78 0.67 0.72 27
George W Bush 0.86 0.86 0.86 146
Gerhard Schroeder 0.76 0.76 0.76 25
Hugo Chavez 0.67 0.67 0.67 15
Tony Blair 0.81 0.69 0.75 36
avg / total 0.80 0.80 0.80 322
"""
from __future__ import print_function
from time import time
import logging
import matplotlib.pyplot as plt
from sklearn.cross_validation import train_test_split
from sklearn.datasets import fetch_lfw_people
from sklearn.grid_search import GridSearchCV
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.decomposition import RandomizedPCA
from sklearn.svm import SVC
print(__doc__)
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s')
###############################################################################
# Download the data, if not already on disk and load it as numpy arrays
lfw_people = fetch_lfw_people(min_faces_per_person=70, resize=0.4)
# introspect the images arrays to find the shapes (for plotting)
n_samples, h, w = lfw_people.images.shape
# for machine learning we use the 2 data directly (as relative pixel
# positions info is ignored by this model)
X = lfw_people.data
n_features = X.shape[1]
# the label to predict is the id of the person
y = lfw_people.target
target_names = lfw_people.target_names
n_classes = target_names.shape[0]
print("Total dataset size:")
print("n_samples: %d" % n_samples)
print("n_features: %d" % n_features)
print("n_classes: %d" % n_classes)
###############################################################################
# Split into a training set and a test set using a stratified k fold
# split into a training and testing set
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.25, random_state=42)
###############################################################################
# Compute a PCA (eigenfaces) on the face dataset (treated as unlabeled
# dataset): unsupervised feature extraction / dimensionality reduction
n_components = 150
print("Extracting the top %d eigenfaces from %d faces"
% (n_components, X_train.shape[0]))
t0 = time()
pca = RandomizedPCA(n_components=n_components, whiten=True).fit(X_train)
print("done in %0.3fs" % (time() - t0))
eigenfaces = pca.components_.reshape((n_components, h, w))
print("Projecting the input data on the eigenfaces orthonormal basis")
t0 = time()
X_train_pca = pca.transform(X_train)
X_test_pca = pca.transform(X_test)
print("done in %0.3fs" % (time() - t0))
###############################################################################
# Train a SVM classification model
print("Fitting the classifier to the training set")
t0 = time()
param_grid = {'C': [1e3, 5e3, 1e4, 5e4, 1e5],
'gamma': [0.0001, 0.0005, 0.001, 0.005, 0.01, 0.1], }
clf = GridSearchCV(SVC(kernel='rbf', class_weight='balanced'), param_grid)
clf = clf.fit(X_train_pca, y_train)
print("done in %0.3fs" % (time() - t0))
print("Best estimator found by grid search:")
print(clf.best_estimator_)
###############################################################################
# Quantitative evaluation of the model quality on the test set
print("Predicting people's names on the test set")
t0 = time()
y_pred = clf.predict(X_test_pca)
print("done in %0.3fs" % (time() - t0))
print(classification_report(y_test, y_pred, target_names=target_names))
print(confusion_matrix(y_test, y_pred, labels=range(n_classes)))
###############################################################################
# Qualitative evaluation of the predictions using matplotlib
def plot_gallery(images, titles, h, w, n_row=3, n_col=4):
"""Helper function to plot a gallery of portraits"""
plt.figure(figsize=(1.8 * n_col, 2.4 * n_row))
plt.subplots_adjust(bottom=0, left=.01, right=.99, top=.90, hspace=.35)
for i in range(n_row * n_col):
plt.subplot(n_row, n_col, i + 1)
plt.imshow(images[i].reshape((h, w)), cmap=plt.cm.gray)
plt.title(titles[i], size=12)
plt.xticks(())
plt.yticks(())
# plot the result of the prediction on a portion of the test set
def title(y_pred, y_test, target_names, i):
pred_name = target_names[y_pred[i]].rsplit(' ', 1)[-1]
true_name = target_names[y_test[i]].rsplit(' ', 1)[-1]
return 'predicted: %s\ntrue: %s' % (pred_name, true_name)
prediction_titles = [title(y_pred, y_test, target_names, i)
for i in range(y_pred.shape[0])]
plot_gallery(X_test, prediction_titles, h, w)
# plot the gallery of the most significative eigenfaces
eigenface_titles = ["eigenface %d" % i for i in range(eigenfaces.shape[0])]
plot_gallery(eigenfaces, eigenface_titles, h, w)
plt.show()
| bsd-3-clause |
HyukjinKwon/spark | python/pyspark/pandas/categorical.py | 15 | 5290 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import TYPE_CHECKING
import pandas as pd
from pandas.api.types import CategoricalDtype
if TYPE_CHECKING:
import pyspark.pandas as ps # noqa: F401 (SPARK-34943)
class CategoricalAccessor(object):
"""
Accessor object for categorical properties of the Series values.
Examples
--------
>>> s = ps.Series(list("abbccc"), dtype="category")
>>> s # doctest: +SKIP
0 a
1 b
2 b
3 c
4 c
5 c
dtype: category
Categories (3, object): ['a', 'b', 'c']
>>> s.cat.categories
Index(['a', 'b', 'c'], dtype='object')
>>> s.cat.codes
0 0
1 1
2 1
3 2
4 2
5 2
dtype: int8
"""
def __init__(self, series: "ps.Series"):
if not isinstance(series.dtype, CategoricalDtype):
raise ValueError("Cannot call CategoricalAccessor on type {}".format(series.dtype))
self._data = series
@property
def categories(self) -> pd.Index:
"""
The categories of this categorical.
Examples
--------
>>> s = ps.Series(list("abbccc"), dtype="category")
>>> s # doctest: +SKIP
0 a
1 b
2 b
3 c
4 c
5 c
dtype: category
Categories (3, object): ['a', 'b', 'c']
>>> s.cat.categories
Index(['a', 'b', 'c'], dtype='object')
"""
return self._data.dtype.categories
@categories.setter
def categories(self, categories: pd.Index) -> None:
raise NotImplementedError()
@property
def ordered(self) -> bool:
"""
Whether the categories have an ordered relationship.
Examples
--------
>>> s = ps.Series(list("abbccc"), dtype="category")
>>> s # doctest: +SKIP
0 a
1 b
2 b
3 c
4 c
5 c
dtype: category
Categories (3, object): ['a', 'b', 'c']
>>> s.cat.ordered
False
"""
return self._data.dtype.ordered
@property
def codes(self) -> "ps.Series":
"""
Return Series of codes as well as the index.
Examples
--------
>>> s = ps.Series(list("abbccc"), dtype="category")
>>> s # doctest: +SKIP
0 a
1 b
2 b
3 c
4 c
5 c
dtype: category
Categories (3, object): ['a', 'b', 'c']
>>> s.cat.codes
0 0
1 1
2 1
3 2
4 2
5 2
dtype: int8
"""
return self._data._with_new_scol(self._data.spark.column).rename()
def add_categories(self, new_categories: pd.Index, inplace: bool = False) -> "ps.Series":
raise NotImplementedError()
def as_ordered(self, inplace: bool = False) -> "ps.Series":
raise NotImplementedError()
def as_unordered(self, inplace: bool = False) -> "ps.Series":
raise NotImplementedError()
def remove_categories(self, removals: pd.Index, inplace: bool = False) -> "ps.Series":
raise NotImplementedError()
def remove_unused_categories(self) -> "ps.Series":
raise NotImplementedError()
def rename_categories(self, new_categories: pd.Index, inplace: bool = False) -> "ps.Series":
raise NotImplementedError()
def reorder_categories(
self, new_categories: pd.Index, ordered: bool = None, inplace: bool = False
) -> "ps.Series":
raise NotImplementedError()
def set_categories(
self,
new_categories: pd.Index,
ordered: bool = None,
rename: bool = False,
inplace: bool = False,
) -> "ps.Series":
raise NotImplementedError()
def _test() -> None:
import os
import doctest
import sys
from pyspark.sql import SparkSession
import pyspark.pandas.categorical
os.chdir(os.environ["SPARK_HOME"])
globs = pyspark.pandas.categorical.__dict__.copy()
globs["ps"] = pyspark.pandas
spark = (
SparkSession.builder.master("local[4]")
.appName("pyspark.pandas.categorical tests")
.getOrCreate()
)
(failure_count, test_count) = doctest.testmod(
pyspark.pandas.categorical,
globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE,
)
spark.stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
| apache-2.0 |
adammenges/statsmodels | statsmodels/sandbox/nonparametric/tests/ex_gam_am_new.py | 34 | 2606 | # -*- coding: utf-8 -*-
"""Example for gam.AdditiveModel and PolynomialSmoother
This example was written as a test case.
The data generating process is chosen so the parameters are well identified
and estimated.
Created on Fri Nov 04 13:45:43 2011
Author: Josef Perktold
"""
from __future__ import print_function
from statsmodels.compat.python import lrange, zip
import time
import numpy as np
#import matplotlib.pyplot as plt
from numpy.testing import assert_almost_equal
from scipy import stats
from statsmodels.sandbox.gam import AdditiveModel
from statsmodels.sandbox.gam import Model as GAM #?
from statsmodels.genmod import families
from statsmodels.genmod.generalized_linear_model import GLM
from statsmodels.regression.linear_model import OLS, WLS
np.random.seed(8765993)
#seed is chosen for nice result, not randomly
#other seeds are pretty off in the prediction
#DGP: simple polynomial
order = 3
sigma_noise = 0.5
nobs = 1000 #1000 #with 1000, OLS and Additivemodel aggree in params at 2 decimals
lb, ub = -3.5, 4#2.5
x1 = np.linspace(lb, ub, nobs)
x2 = np.sin(2*x1)
x = np.column_stack((x1/x1.max()*2, x2))
exog = (x[:,:,None]**np.arange(order+1)[None, None, :]).reshape(nobs, -1)
idx = lrange((order+1)*2)
del idx[order+1]
exog_reduced = exog[:,idx] #remove duplicate constant
y_true = exog.sum(1) / 2.
z = y_true #alias check
d = x
y = y_true + sigma_noise * np.random.randn(nobs)
example = 1
if example == 1:
m = AdditiveModel(d)
m.fit(y)
y_pred = m.results.predict(d)
for ss in m.smoothers:
print(ss.params)
res_ols = OLS(y, exog_reduced).fit()
print(res_ols.params)
#assert_almost_equal(y_pred, res_ols.fittedvalues, 3)
if example > 0:
import matplotlib.pyplot as plt
plt.figure()
plt.plot(exog)
y_pred = m.results.mu# + m.results.alpha #m.results.predict(d)
plt.figure()
plt.subplot(2,2,1)
plt.plot(y, '.', alpha=0.25)
plt.plot(y_true, 'k-', label='true')
plt.plot(res_ols.fittedvalues, 'g-', label='OLS', lw=2, alpha=-.7)
plt.plot(y_pred, 'r-', label='AM')
plt.legend(loc='upper left')
plt.title('gam.AdditiveModel')
counter = 2
for ii, xx in zip(['z', 'x1', 'x2'], [z, x[:,0], x[:,1]]):
sortidx = np.argsort(xx)
#plt.figure()
plt.subplot(2, 2, counter)
plt.plot(xx[sortidx], y[sortidx], '.', alpha=0.25)
plt.plot(xx[sortidx], y_true[sortidx], 'k.', label='true', lw=2)
plt.plot(xx[sortidx], y_pred[sortidx], 'r.', label='AM')
plt.legend(loc='upper left')
plt.title('gam.AdditiveModel ' + ii)
counter += 1
plt.show() | bsd-3-clause |
arthurmensch/modl | modl/input_data/fmri/base.py | 1 | 5369 | import copy
import warnings
from nilearn._utils.class_inspect import get_params
from nilearn.input_data import MultiNiftiMasker
from nilearn.input_data.masker_validation import check_embedded_nifti_masker
from sklearn.base import BaseEstimator
from sklearn.externals.joblib import Memory
from nilearn._utils.compat import _basestring
import numpy as np
class BaseNilearnEstimator(BaseEstimator):
def __init__(self,
mask=None, smoothing_fwhm=None,
standardize=True, detrend=True,
low_pass=None, high_pass=None, t_r=None,
target_affine=None, target_shape=None,
mask_strategy='epi', mask_args=None,
memory=Memory(cachedir=None),
memory_level=2,
n_jobs=1, verbose=0, ):
self.mask = mask
self.smoothing_fwhm = smoothing_fwhm
self.standardize = standardize
self.detrend = detrend
self.low_pass = low_pass
self.high_pass = high_pass
self.t_r = t_r
self.target_affine = target_affine
self.target_shape = target_shape
self.mask_strategy = mask_strategy
self.mask_args = mask_args
self.memory = memory
self.memory_level = memory_level
self.n_jobs = n_jobs
self.verbose = verbose
def fit(self, imgs=None, y=None, confounds=None):
if isinstance(imgs, _basestring) or not hasattr(imgs, '__iter__'):
# these classes are meant for list of 4D images
# (multi-subject), we want it to work also on a single
# subject, so we hack it.
imgs = [imgs, ]
if len(imgs) == 0:
# Common error that arises from a null glob. Capture
# it early and raise a helpful message
raise ValueError('Need one or more Niimg-like objects as input, '
'an empty list was given.')
self.masker_ = check_embedded_nifti_masker(self)
# Avoid warning with imgs != None
# if masker_ has been provided a mask_img
if self.masker_.mask_img is None:
self.masker_.fit(imgs)
else:
self.masker_.fit()
self.mask_img_ = self.masker_.mask_img_
return self
def safe_to_filename(img, filename):
img = copy.deepcopy(img)
img.to_filename(filename)
def check_embedded_nifti_masker(estimator):
"""Base function for using a masker within a BaseEstimator class
This creates a masker from instance parameters :
- If instance contains a mask image in mask parameter,
we use this image as new masker mask_img, forwarding instance parameters to
new masker : smoothing_fwhm, standardize, detrend, low_pass= high_pass,
t_r, target_affine, target_shape, mask_strategy, mask_args,
- If instance contains a masker in mask parameter, we use a copy of
this masker, overriding all instance masker related parameters.
In all case, we forward system parameters of instance to new masker :
memory, memory_level, verbose, n_jobs
Parameters
----------
instance: object, instance of BaseEstimator
The object that gives us the values of the parameters
multi_subject: boolean
Indicates whether to return a MultiNiftiMasker or a NiftiMasker
(the default is True)
Returns
-------
masker: MultiNiftiMasker or NiftiMasker
New masker
"""
estimator_params = get_params(MultiNiftiMasker, estimator)
mask = getattr(estimator, 'mask', None)
if mask is not None and hasattr(mask, 'mask_img'):
# Creating (Multi)NiftiMasker from provided masker
masker_class = mask.__class__
masker_params = get_params(MultiNiftiMasker, mask)
new_masker_params = masker_params
else:
# Creating (Multi)NiftiMasker
# with parameters extracted from estimator
masker_class = MultiNiftiMasker
new_masker_params = estimator_params
new_masker_params['mask_img'] = mask
# Forwarding technical params
new_masker_params['n_jobs'] = estimator.n_jobs
new_masker_params['memory'] = estimator.memory
new_masker_params['memory_level'] = max(0, estimator.memory_level - 1)
new_masker_params['verbose'] = estimator.verbose
# Raising warning if masker override parameters
conflict_string = ""
for param_key in sorted(estimator_params):
if np.any(new_masker_params[param_key] != estimator_params[param_key]):
conflict_string += ("Parameter {0} :\n"
" Masker parameter {1}"
" - overriding estimator parameter {2}\n"
).format(param_key,
new_masker_params[param_key],
estimator_params[param_key])
if conflict_string != "":
warn_str = ("Overriding provided-default estimator parameters with"
" provided masker parameters :\n"
"{0:s}").format(conflict_string)
warnings.warn(warn_str)
masker = masker_class(**new_masker_params)
# Forwarding potential attribute of provided masker
if hasattr(mask, 'mask_img_'):
# Allow free fit of returned mask
masker.mask_img = mask.mask_img_
return masker
| bsd-2-clause |
reuk/wayverb | demo/evaluation/receivers/cardioid.py | 2 | 1909 | #!/usr/local/bin/python
import numpy as np
import matplotlib
render = True
if render:
matplotlib.use('pgf')
import matplotlib.pyplot as plt
from matplotlib.ticker import MaxNLocator # added
import matplotlib.mlab as mlab
from string import split
import scipy.signal as signal
import pysndfile
import math
import os
import re
import json
def main():
fig, (ax0, ax1) = plt.subplots(nrows=2, sharex=True, sharey=True)
cmap = plt.get_cmap('viridis')
def plt_file(ax, file_name, name):
sndfile = pysndfile.PySndfile(file_name, 'r')
if sndfile.channels() != 1:
raise RuntimeError('please only load mono files')
Fs = sndfile.samplerate()
signal = sndfile.read_frames()
time = np.arange(len(signal)) / float(Fs)
ax.plot(time, signal)
ax.text(0.001, 0.75, name)
ax1.set_xlabel('time / s')
ax1.set_xlim([0, 0.05])
fig.subplots_adjust(hspace=0)
plt.setp([a.get_xticklabels() for a in fig.axes[:-1]], visible=False)
times = [
3.0 / 340.0,
5.0 / 340.0,
11.0 / 340.0]
for ax in fig.axes:
ax.set_ylabel('amplitude')
for t in times:
ax.axvline(t, linestyle='dotted', color='red')
plt_file(ax0, 'away.wav', 'away')
plt_file(ax1, 'toward.wav', 'toward')
ax1.yaxis.set_major_locator(MaxNLocator(prune='upper')) # added
plt.suptitle('Early Response for Cardoid Receivers Pointing Toward and Away from Source')
#plt.tight_layout()
#plt.subplots_adjust(top=0.9)
plt.show()
if render:
plt.savefig('cardioid.svg', bbox_inches='tight', dpi=96, format='svg')
if __name__ == '__main__':
pgf_with_rc_fonts = {
'font.family': 'serif',
'font.serif': [],
'font.sans-serif': ['Helvetica Neue'],
'legend.fontsize': 12,
}
matplotlib.rcParams.update(pgf_with_rc_fonts)
main()
| gpl-2.0 |
DistributedML/TorML | eurosys-eval/results_tor_no_tor/makeplot.py | 1 | 1404 | import matplotlib.pyplot as plt
import numpy as np
import pdb
if __name__ == "__main__":
fig, ax = plt.subplots(figsize=(10,5))
for clients in (10, 50, 100, 200):
median_data = np.zeros(5)
for k in (1, 2, 3, 4, 5):
data = np.loadtxt("loss_" + str(clients) + "_" + str(k) + ".csv", delimiter=',')
median_data[k-1] = data.shape[0]
print str(clients) + " median is " + str(np.median(median_data))
print str(clients) + " stddev is " + str(np.std(median_data))
data1 = np.loadtxt("loss_10_2.csv", delimiter=',')
data2 = np.loadtxt("loss_50_2.csv", delimiter=',')
data3 = np.loadtxt("loss_100_2.csv", delimiter=',')
data4 = np.loadtxt("loss_200_2.csv", delimiter=',')
plt.plot(data1, color="black", label="10 clients", lw=5)
plt.plot(data2, color="red", label="50 clients", lw=5)
plt.plot(data3, color="orange", label="100 clients", lw=5)
plt.plot(data4, color="green", label="200 clients", lw=5)
plt.legend(loc='best', ncol=1, fontsize=18)
plt.xlabel("Time (s)", fontsize=22)
plt.ylabel("Training Error", fontsize=22)
axes = plt.gca()
axes.set_ylim([0, 0.5])
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
plt.setp(ax.get_xticklabels(), fontsize=18)
plt.setp(ax.get_yticklabels(), fontsize=18)
plt.tight_layout()
plt.show() | mit |
MorganR/gaussian-processes | utils/plots.py | 1 | 1550 | import numpy as np
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from data.image import get_xyz_space
def plot_image(image):
plt.imshow(image)
plt.colorbar()
plt.show()
def plot_image_3d(image):
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
xyz = get_xyz_space(image)
ax.plot_trisurf(xyz[0], xyz[1], xyz[2])
plt.show()
def plot_image_and_model(image, model):
fig = plt.figure()
ax = fig.add_subplot(211)
ax_im = ax.imshow(image)
fig.colorbar(ax_im)
height, width = image.shape
xx = np.linspace(0, width-1, width*4)
yy = np.linspace(0, height-1, height*4)
xy = np.meshgrid(xx, yy)
xx = xy[0].flatten()
yy = xy[1].flatten()
xy = np.stack((xx, yy), axis=-1)
mean, var = model.predict_y(xy)
mean = np.reshape(mean, (height*4, width*4))
ax = fig.add_subplot(212)
ax_im = ax.imshow(mean)
fig.colorbar(ax_im)
plt.show()
def plot_image_and_model_3d(image, model):
fig = plt.figure()
ax = fig.add_subplot(211, projection='3d')
xyz = get_xyz_space(image)
ax.plot_trisurf(xyz[0], xyz[1], xyz[2])
height, width = image.shape
xx = np.linspace(0, width-1, width*4)
yy = np.linspace(0, height-1, height*4)
xy = np.meshgrid(xx, yy)
xx = xy[0].flatten()
yy = xy[1].flatten()
xy = np.stack((xx, yy), axis=-1)
mean, var = model.predict_y(xy)
ax = fig.add_subplot(212, projection='3d')
ax.plot_trisurf(xx, yy, mean.flatten(), color='g')
plt.show()
| mit |
Unidata/MetPy | v0.11/_downloads/fa4ffdc62b92ef4744fde2f3640e4f6a/Point_Interpolation.py | 4 | 5077 | # Copyright (c) 2016 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""
Point Interpolation
===================
Compares different point interpolation approaches.
"""
import cartopy.crs as ccrs
import cartopy.feature as cfeature
from matplotlib.colors import BoundaryNorm
import matplotlib.pyplot as plt
import numpy as np
from metpy.cbook import get_test_data
from metpy.interpolate import (interpolate_to_grid, remove_nan_observations,
remove_repeat_coordinates)
from metpy.plots import add_metpy_logo
###########################################
def basic_map(proj):
"""Make our basic default map for plotting"""
fig = plt.figure(figsize=(15, 10))
add_metpy_logo(fig, 0, 80, size='large')
view = fig.add_axes([0, 0, 1, 1], projection=proj)
view.set_extent([-120, -70, 20, 50])
view.add_feature(cfeature.STATES.with_scale('50m'))
view.add_feature(cfeature.OCEAN)
view.add_feature(cfeature.COASTLINE)
view.add_feature(cfeature.BORDERS, linestyle=':')
return fig, view
def station_test_data(variable_names, proj_from=None, proj_to=None):
with get_test_data('station_data.txt') as f:
all_data = np.loadtxt(f, skiprows=1, delimiter=',',
usecols=(1, 2, 3, 4, 5, 6, 7, 17, 18, 19),
dtype=np.dtype([('stid', '3S'), ('lat', 'f'), ('lon', 'f'),
('slp', 'f'), ('air_temperature', 'f'),
('cloud_fraction', 'f'), ('dewpoint', 'f'),
('weather', '16S'),
('wind_dir', 'f'), ('wind_speed', 'f')]))
all_stids = [s.decode('ascii') for s in all_data['stid']]
data = np.concatenate([all_data[all_stids.index(site)].reshape(1, ) for site in all_stids])
value = data[variable_names]
lon = data['lon']
lat = data['lat']
if proj_from is not None and proj_to is not None:
try:
proj_points = proj_to.transform_points(proj_from, lon, lat)
return proj_points[:, 0], proj_points[:, 1], value
except Exception as e:
print(e)
return None
return lon, lat, value
from_proj = ccrs.Geodetic()
to_proj = ccrs.AlbersEqualArea(central_longitude=-97.0000, central_latitude=38.0000)
levels = list(range(-20, 20, 1))
cmap = plt.get_cmap('magma')
norm = BoundaryNorm(levels, ncolors=cmap.N, clip=True)
x, y, temp = station_test_data('air_temperature', from_proj, to_proj)
x, y, temp = remove_nan_observations(x, y, temp)
x, y, temp = remove_repeat_coordinates(x, y, temp)
###########################################
# Scipy.interpolate linear
# ------------------------
gx, gy, img = interpolate_to_grid(x, y, temp, interp_type='linear', hres=75000)
img = np.ma.masked_where(np.isnan(img), img)
fig, view = basic_map(to_proj)
mmb = view.pcolormesh(gx, gy, img, cmap=cmap, norm=norm)
fig.colorbar(mmb, shrink=.4, pad=0, boundaries=levels)
###########################################
# Natural neighbor interpolation (MetPy implementation)
# -----------------------------------------------------
# `Reference <https://github.com/Unidata/MetPy/files/138653/cwp-657.pdf>`_
gx, gy, img = interpolate_to_grid(x, y, temp, interp_type='natural_neighbor', hres=75000)
img = np.ma.masked_where(np.isnan(img), img)
fig, view = basic_map(to_proj)
mmb = view.pcolormesh(gx, gy, img, cmap=cmap, norm=norm)
fig.colorbar(mmb, shrink=.4, pad=0, boundaries=levels)
###########################################
# Cressman interpolation
# ----------------------
# search_radius = 100 km
#
# grid resolution = 25 km
#
# min_neighbors = 1
gx, gy, img = interpolate_to_grid(x, y, temp, interp_type='cressman', minimum_neighbors=1,
hres=75000, search_radius=100000)
img = np.ma.masked_where(np.isnan(img), img)
fig, view = basic_map(to_proj)
mmb = view.pcolormesh(gx, gy, img, cmap=cmap, norm=norm)
fig.colorbar(mmb, shrink=.4, pad=0, boundaries=levels)
###########################################
# Barnes Interpolation
# --------------------
# search_radius = 100km
#
# min_neighbors = 3
gx, gy, img1 = interpolate_to_grid(x, y, temp, interp_type='barnes', hres=75000,
search_radius=100000)
img1 = np.ma.masked_where(np.isnan(img1), img1)
fig, view = basic_map(to_proj)
mmb = view.pcolormesh(gx, gy, img1, cmap=cmap, norm=norm)
fig.colorbar(mmb, shrink=.4, pad=0, boundaries=levels)
###########################################
# Radial basis function interpolation
# ------------------------------------
# linear
gx, gy, img = interpolate_to_grid(x, y, temp, interp_type='rbf', hres=75000, rbf_func='linear',
rbf_smooth=0)
img = np.ma.masked_where(np.isnan(img), img)
fig, view = basic_map(to_proj)
mmb = view.pcolormesh(gx, gy, img, cmap=cmap, norm=norm)
fig.colorbar(mmb, shrink=.4, pad=0, boundaries=levels)
plt.show()
| bsd-3-clause |
joelmpiper/bill_taxonomy | src/analyze/model.py | 1 | 5622 | import psycopg2
import pandas as pd
from sklearn.cross_validation import train_test_split
from sklearn import metrics
from sklearn.naive_bayes import MultinomialNB
from sklearn.feature_extraction.text import CountVectorizer
import bills.wrangle.create_corpus
from bills.ingest.setup_database import Subject_Score
from sqlalchemy.orm import sessionmaker
from sqlalchemy import create_engine
from sklearn.linear_model import LogisticRegression
import pickle
import yaml
class Bill_Info(object):
""" class to store bill information """
def __init__(self):
with open("../configs.yml", 'r') as ymlfile:
self.cfg = yaml.load(ymlfile)
def get_bill_info(dbname, username, subject):
con = psycopg2.connect(database=dbname, user=username)
# query:
sql_query = """
SELECT bill_num, bill_name, bill_text FROM us_bills
"""
us_bills = pd.read_sql_query(sql_query, con)
sql_query = """
SELECT bill_num, subject FROM bill_subject
WHERE subject='{0}'
"""
subject_terms = pd.read_sql_query(sql_query.format(subject), con)
subject_col_name = subject.lower().replace(' ', '_')
us_bills[subject_col_name] = 0
us_bills.ix[us_bills['bill_num'].isin(subject_terms['bill_num']),
subject_col_name] = 1
# X = us_bills[['bill_name', 'bill_text']]
X = us_bills['bill_text']
y = us_bills[subject_col_name]
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=1)
return X_train, X_test, y_train, y_test
def get_us_data(dbname, username, subject, model, model_type='naive_bayes'):
X_train, X_test, y_train, y_test = get_bill_info(dbname, username,
subject)
X_train_dtm = model.fit_transform(X_train)
X_test_dtm = model.transform(X_test)
# use Naive Bayes to predict the star rating
mod = MultinomialNB()
if(model_type == 'naive_bayes'):
mod = MultinomialNB()
elif(model_type == 'logistic'):
mod = LogisticRegression(C=1e9, penalty='l1')
mod.fit(X_train_dtm, y_train)
y_pred_class = mod.predict(X_test_dtm)
y_pred_prob = mod.predict_proba(X_test_dtm)[:, 1]
combo_results = [mod, model, y_pred_class, y_pred_prob,
X_train_dtm, X_test_dtm, y_train,
y_test]
pickle.dump(combo_results,
open(('/Users/Joel/Desktop/Insight/data/model.p'), 'wb'))
# return (metrics.accuracy_score(y_test, y_pred_class),
# metrics.confusion_matrix(y_test, y_pred_class),
# len(y_pred_prob))
print(metrics.accuracy_score(y_test, y_pred_class))
print(metrics.confusion_matrix(y_test, y_pred_class))
print(max(y_pred_prob))
return model, mod
# create a score for all of the subjects
def score_all_subjects():
with open("configs.yml", 'r') as ymlfile:
cfg = yaml.load(ymlfile)
dbname = cfg['dbname']
username = cfg['username']
model_type = cfg['model_type']
subset_size = cfg['subset_size']
score_pickle_dest = cfg['output_dir']
subjects = ['Competition and antitrust', 'Bank accounts, deposits, capital',
'Bankruptcy', 'Employee benefits and pensions',
'Intellectual property', 'Labor and employment', 'Securities',
'Taxation']
pairs = []
for sub in subjects:
pairs.append(score_ny_bills(dbname, username, sub, model_type,
subset_size))
pickle.dump(pairs,
open((score_pickle_dest), 'wb'))
def score_ny_bills(dbname='bills_db', username='Joel', subject="Health",
model_type="naive_bayes", subset_size=None):
con = psycopg2.connect(database=dbname, user=username)
# query:
sql_query = """
SELECT bill_num, bill_name, bill_text FROM ny_bills
"""
ny_bills = pd.read_sql_query(sql_query, con)
# include 1-grams and 2-grams, we end up with many features
vect = CountVectorizer(stop_words='english')
transformer, trained_model = get_us_data(dbname, username, subject, vect,
model_type)
X_ny = transformer.transform(ny_bills['bill_text'])
probs = trained_model.predict_proba(X_ny)[:, 1]
pairs = [(ny_bills.ix[i, 'bill_name'],
probs[i]) for i in range(0, len(ny_bills))]
# store_results(dbname, username, subject, ny_bills['bill_num'], probs)
return pairs
def store_results(dbname, username, subject, bill_nums, probs):
engine = create_engine('postgres://%s@localhost/%s' % (username, dbname))
# Open a session and connect to the database engine
Session = sessionmaker(bind=engine)
session = Session()
for i, bill in enumerate(bill_nums):
one_bill = Subject_Score(subject=subject, bill_num=bill,
score=probs[i])
session.add(one_bill)
session.commit()
session.close()
def create_lda_tfidf(dbname='bills_db', username='Joel'):
X_train, X_test, y_train, y_test = get_bill_info(dbname, username)
# create corpus, lda, tf/idf model using the training data
title_corpus, text_corpus = bills.wrangle.create_corpus.real_corpus(
X_train['bill_name'], X_train['bill_text'])
# apply the lda model on the X_train
# apply the tf/idf model on the X_train
return bills.wrangle.create_corpus.create_corpora(X_train['bill_name'],
X_train['bill_text'])
# combine these two as input vectors
# run the logistic regression on those sets of features
| mit |
ClimbsRocks/scikit-learn | sklearn/metrics/setup.py | 24 | 1059 | import os
import os.path
import numpy
from numpy.distutils.misc_util import Configuration
from sklearn._build_utils import get_blas_info
def configuration(parent_package="", top_path=None):
config = Configuration("metrics", parent_package, top_path)
cblas_libs, blas_info = get_blas_info()
if os.name == 'posix':
cblas_libs.append('m')
config.add_extension("pairwise_fast",
sources=["pairwise_fast.c"],
include_dirs=[os.path.join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
libraries=cblas_libs,
extra_compile_args=blas_info.pop('extra_compile_args',
[]),
**blas_info)
config.add_subpackage('tests')
return config
if __name__ == "__main__":
from numpy.distutils.core import setup
setup(**configuration().todict())
| bsd-3-clause |
ilyes14/scikit-learn | sklearn/ensemble/tests/test_partial_dependence.py | 365 | 6996 | """
Testing for the partial dependence module.
"""
import numpy as np
from numpy.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import if_matplotlib
from sklearn.ensemble.partial_dependence import partial_dependence
from sklearn.ensemble.partial_dependence import plot_partial_dependence
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import GradientBoostingRegressor
from sklearn import datasets
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# also load the boston dataset
boston = datasets.load_boston()
# also load the iris dataset
iris = datasets.load_iris()
def test_partial_dependence_classifier():
# Test partial dependence for classifier
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(X, y)
pdp, axes = partial_dependence(clf, [0], X=X, grid_resolution=5)
# only 4 grid points instead of 5 because only 4 unique X[:,0] vals
assert pdp.shape == (1, 4)
assert axes[0].shape[0] == 4
# now with our own grid
X_ = np.asarray(X)
grid = np.unique(X_[:, 0])
pdp_2, axes = partial_dependence(clf, [0], grid=grid)
assert axes is None
assert_array_equal(pdp, pdp_2)
def test_partial_dependence_multiclass():
# Test partial dependence for multi-class classifier
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(iris.data, iris.target)
grid_resolution = 25
n_classes = clf.n_classes_
pdp, axes = partial_dependence(
clf, [0], X=iris.data, grid_resolution=grid_resolution)
assert pdp.shape == (n_classes, grid_resolution)
assert len(axes) == 1
assert axes[0].shape[0] == grid_resolution
def test_partial_dependence_regressor():
# Test partial dependence for regressor
clf = GradientBoostingRegressor(n_estimators=10, random_state=1)
clf.fit(boston.data, boston.target)
grid_resolution = 25
pdp, axes = partial_dependence(
clf, [0], X=boston.data, grid_resolution=grid_resolution)
assert pdp.shape == (1, grid_resolution)
assert axes[0].shape[0] == grid_resolution
def test_partial_dependecy_input():
# Test input validation of partial dependence.
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(X, y)
assert_raises(ValueError, partial_dependence,
clf, [0], grid=None, X=None)
assert_raises(ValueError, partial_dependence,
clf, [0], grid=[0, 1], X=X)
# first argument must be an instance of BaseGradientBoosting
assert_raises(ValueError, partial_dependence,
{}, [0], X=X)
# Gradient boosting estimator must be fit
assert_raises(ValueError, partial_dependence,
GradientBoostingClassifier(), [0], X=X)
assert_raises(ValueError, partial_dependence, clf, [-1], X=X)
assert_raises(ValueError, partial_dependence, clf, [100], X=X)
# wrong ndim for grid
grid = np.random.rand(10, 2, 1)
assert_raises(ValueError, partial_dependence, clf, [0], grid=grid)
@if_matplotlib
def test_plot_partial_dependence():
# Test partial dependence plot function.
clf = GradientBoostingRegressor(n_estimators=10, random_state=1)
clf.fit(boston.data, boston.target)
grid_resolution = 25
fig, axs = plot_partial_dependence(clf, boston.data, [0, 1, (0, 1)],
grid_resolution=grid_resolution,
feature_names=boston.feature_names)
assert len(axs) == 3
assert all(ax.has_data for ax in axs)
# check with str features and array feature names
fig, axs = plot_partial_dependence(clf, boston.data, ['CRIM', 'ZN',
('CRIM', 'ZN')],
grid_resolution=grid_resolution,
feature_names=boston.feature_names)
assert len(axs) == 3
assert all(ax.has_data for ax in axs)
# check with list feature_names
feature_names = boston.feature_names.tolist()
fig, axs = plot_partial_dependence(clf, boston.data, ['CRIM', 'ZN',
('CRIM', 'ZN')],
grid_resolution=grid_resolution,
feature_names=feature_names)
assert len(axs) == 3
assert all(ax.has_data for ax in axs)
@if_matplotlib
def test_plot_partial_dependence_input():
# Test partial dependence plot function input checks.
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
# not fitted yet
assert_raises(ValueError, plot_partial_dependence,
clf, X, [0])
clf.fit(X, y)
assert_raises(ValueError, plot_partial_dependence,
clf, np.array(X)[:, :0], [0])
# first argument must be an instance of BaseGradientBoosting
assert_raises(ValueError, plot_partial_dependence,
{}, X, [0])
# must be larger than -1
assert_raises(ValueError, plot_partial_dependence,
clf, X, [-1])
# too large feature value
assert_raises(ValueError, plot_partial_dependence,
clf, X, [100])
# str feature but no feature_names
assert_raises(ValueError, plot_partial_dependence,
clf, X, ['foobar'])
# not valid features value
assert_raises(ValueError, plot_partial_dependence,
clf, X, [{'foo': 'bar'}])
@if_matplotlib
def test_plot_partial_dependence_multiclass():
# Test partial dependence plot function on multi-class input.
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(iris.data, iris.target)
grid_resolution = 25
fig, axs = plot_partial_dependence(clf, iris.data, [0, 1],
label=0,
grid_resolution=grid_resolution)
assert len(axs) == 2
assert all(ax.has_data for ax in axs)
# now with symbol labels
target = iris.target_names[iris.target]
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(iris.data, target)
grid_resolution = 25
fig, axs = plot_partial_dependence(clf, iris.data, [0, 1],
label='setosa',
grid_resolution=grid_resolution)
assert len(axs) == 2
assert all(ax.has_data for ax in axs)
# label not in gbrt.classes_
assert_raises(ValueError, plot_partial_dependence,
clf, iris.data, [0, 1], label='foobar',
grid_resolution=grid_resolution)
# label not provided
assert_raises(ValueError, plot_partial_dependence,
clf, iris.data, [0, 1],
grid_resolution=grid_resolution)
| bsd-3-clause |
AlexanderFabisch/scikit-learn | sklearn/preprocessing/label.py | 16 | 26702 | # Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Olivier Grisel <[email protected]>
# Andreas Mueller <[email protected]>
# Joel Nothman <[email protected]>
# Hamzeh Alsalhi <[email protected]>
# License: BSD 3 clause
from collections import defaultdict
import itertools
import array
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, TransformerMixin
from ..utils.fixes import np_version
from ..utils.fixes import sparse_min_max
from ..utils.fixes import astype
from ..utils.fixes import in1d
from ..utils import column_or_1d
from ..utils.validation import check_array
from ..utils.validation import check_is_fitted
from ..utils.validation import _num_samples
from ..utils.multiclass import unique_labels
from ..utils.multiclass import type_of_target
from ..externals import six
zip = six.moves.zip
map = six.moves.map
__all__ = [
'label_binarize',
'LabelBinarizer',
'LabelEncoder',
'MultiLabelBinarizer',
]
def _check_numpy_unicode_bug(labels):
"""Check that user is not subject to an old numpy bug
Fixed in master before 1.7.0:
https://github.com/numpy/numpy/pull/243
"""
if np_version[:3] < (1, 7, 0) and labels.dtype.kind == 'U':
raise RuntimeError("NumPy < 1.7.0 does not implement searchsorted"
" on unicode data correctly. Please upgrade"
" NumPy to use LabelEncoder with unicode inputs.")
class LabelEncoder(BaseEstimator, TransformerMixin):
"""Encode labels with value between 0 and n_classes-1.
Read more in the :ref:`User Guide <preprocessing_targets>`.
Attributes
----------
classes_ : array of shape (n_class,)
Holds the label for each class.
Examples
--------
`LabelEncoder` can be used to normalize labels.
>>> from sklearn import preprocessing
>>> le = preprocessing.LabelEncoder()
>>> le.fit([1, 2, 2, 6])
LabelEncoder()
>>> le.classes_
array([1, 2, 6])
>>> le.transform([1, 1, 2, 6]) #doctest: +ELLIPSIS
array([0, 0, 1, 2]...)
>>> le.inverse_transform([0, 0, 1, 2])
array([1, 1, 2, 6])
It can also be used to transform non-numerical labels (as long as they are
hashable and comparable) to numerical labels.
>>> le = preprocessing.LabelEncoder()
>>> le.fit(["paris", "paris", "tokyo", "amsterdam"])
LabelEncoder()
>>> list(le.classes_)
['amsterdam', 'paris', 'tokyo']
>>> le.transform(["tokyo", "tokyo", "paris"]) #doctest: +ELLIPSIS
array([2, 2, 1]...)
>>> list(le.inverse_transform([2, 2, 1]))
['tokyo', 'tokyo', 'paris']
"""
def fit(self, y):
"""Fit label encoder
Parameters
----------
y : array-like of shape (n_samples,)
Target values.
Returns
-------
self : returns an instance of self.
"""
y = column_or_1d(y, warn=True)
_check_numpy_unicode_bug(y)
self.classes_ = np.unique(y)
return self
def fit_transform(self, y):
"""Fit label encoder and return encoded labels
Parameters
----------
y : array-like of shape [n_samples]
Target values.
Returns
-------
y : array-like of shape [n_samples]
"""
y = column_or_1d(y, warn=True)
_check_numpy_unicode_bug(y)
self.classes_, y = np.unique(y, return_inverse=True)
return y
def transform(self, y):
"""Transform labels to normalized encoding.
Parameters
----------
y : array-like of shape [n_samples]
Target values.
Returns
-------
y : array-like of shape [n_samples]
"""
check_is_fitted(self, 'classes_')
classes = np.unique(y)
_check_numpy_unicode_bug(classes)
if len(np.intersect1d(classes, self.classes_)) < len(classes):
diff = np.setdiff1d(classes, self.classes_)
raise ValueError("y contains new labels: %s" % str(diff))
return np.searchsorted(self.classes_, y)
def inverse_transform(self, y):
"""Transform labels back to original encoding.
Parameters
----------
y : numpy array of shape [n_samples]
Target values.
Returns
-------
y : numpy array of shape [n_samples]
"""
check_is_fitted(self, 'classes_')
diff = np.setdiff1d(y, np.arange(len(self.classes_)))
if diff:
raise ValueError("y contains new labels: %s" % str(diff))
y = np.asarray(y)
return self.classes_[y]
class LabelBinarizer(BaseEstimator, TransformerMixin):
"""Binarize labels in a one-vs-all fashion
Several regression and binary classification algorithms are
available in the scikit. A simple way to extend these algorithms
to the multi-class classification case is to use the so-called
one-vs-all scheme.
At learning time, this simply consists in learning one regressor
or binary classifier per class. In doing so, one needs to convert
multi-class labels to binary labels (belong or does not belong
to the class). LabelBinarizer makes this process easy with the
transform method.
At prediction time, one assigns the class for which the corresponding
model gave the greatest confidence. LabelBinarizer makes this easy
with the inverse_transform method.
Read more in the :ref:`User Guide <preprocessing_targets>`.
Parameters
----------
neg_label : int (default: 0)
Value with which negative labels must be encoded.
pos_label : int (default: 1)
Value with which positive labels must be encoded.
sparse_output : boolean (default: False)
True if the returned array from transform is desired to be in sparse
CSR format.
Attributes
----------
classes_ : array of shape [n_class]
Holds the label for each class.
y_type_ : str,
Represents the type of the target data as evaluated by
utils.multiclass.type_of_target. Possible type are 'continuous',
'continuous-multioutput', 'binary', 'multiclass',
'mutliclass-multioutput', 'multilabel-indicator', and 'unknown'.
sparse_input_ : boolean,
True if the input data to transform is given as a sparse matrix, False
otherwise.
Examples
--------
>>> from sklearn import preprocessing
>>> lb = preprocessing.LabelBinarizer()
>>> lb.fit([1, 2, 6, 4, 2])
LabelBinarizer(neg_label=0, pos_label=1, sparse_output=False)
>>> lb.classes_
array([1, 2, 4, 6])
>>> lb.transform([1, 6])
array([[1, 0, 0, 0],
[0, 0, 0, 1]])
Binary targets transform to a column vector
>>> lb = preprocessing.LabelBinarizer()
>>> lb.fit_transform(['yes', 'no', 'no', 'yes'])
array([[1],
[0],
[0],
[1]])
Passing a 2D matrix for multilabel classification
>>> import numpy as np
>>> lb.fit(np.array([[0, 1, 1], [1, 0, 0]]))
LabelBinarizer(neg_label=0, pos_label=1, sparse_output=False)
>>> lb.classes_
array([0, 1, 2])
>>> lb.transform([0, 1, 2, 1])
array([[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
[0, 1, 0]])
See also
--------
label_binarize : function to perform the transform operation of
LabelBinarizer with fixed classes.
"""
def __init__(self, neg_label=0, pos_label=1, sparse_output=False):
if neg_label >= pos_label:
raise ValueError("neg_label={0} must be strictly less than "
"pos_label={1}.".format(neg_label, pos_label))
if sparse_output and (pos_label == 0 or neg_label != 0):
raise ValueError("Sparse binarization is only supported with non "
"zero pos_label and zero neg_label, got "
"pos_label={0} and neg_label={1}"
"".format(pos_label, neg_label))
self.neg_label = neg_label
self.pos_label = pos_label
self.sparse_output = sparse_output
def fit(self, y):
"""Fit label binarizer
Parameters
----------
y : numpy array of shape (n_samples,) or (n_samples, n_classes)
Target values. The 2-d matrix should only contain 0 and 1,
represents multilabel classification.
Returns
-------
self : returns an instance of self.
"""
self.y_type_ = type_of_target(y)
if 'multioutput' in self.y_type_:
raise ValueError("Multioutput target data is not supported with "
"label binarization")
if _num_samples(y) == 0:
raise ValueError('y has 0 samples: %r' % y)
self.sparse_input_ = sp.issparse(y)
self.classes_ = unique_labels(y)
return self
def transform(self, y):
"""Transform multi-class labels to binary labels
The output of transform is sometimes referred to by some authors as the
1-of-K coding scheme.
Parameters
----------
y : numpy array or sparse matrix of shape (n_samples,) or
(n_samples, n_classes) Target values. The 2-d matrix should only
contain 0 and 1, represents multilabel classification. Sparse
matrix can be CSR, CSC, COO, DOK, or LIL.
Returns
-------
Y : numpy array or CSR matrix of shape [n_samples, n_classes]
Shape will be [n_samples, 1] for binary problems.
"""
check_is_fitted(self, 'classes_')
y_is_multilabel = type_of_target(y).startswith('multilabel')
if y_is_multilabel and not self.y_type_.startswith('multilabel'):
raise ValueError("The object was not fitted with multilabel"
" input.")
return label_binarize(y, self.classes_,
pos_label=self.pos_label,
neg_label=self.neg_label,
sparse_output=self.sparse_output)
def inverse_transform(self, Y, threshold=None):
"""Transform binary labels back to multi-class labels
Parameters
----------
Y : numpy array or sparse matrix with shape [n_samples, n_classes]
Target values. All sparse matrices are converted to CSR before
inverse transformation.
threshold : float or None
Threshold used in the binary and multi-label cases.
Use 0 when:
- Y contains the output of decision_function (classifier)
Use 0.5 when:
- Y contains the output of predict_proba
If None, the threshold is assumed to be half way between
neg_label and pos_label.
Returns
-------
y : numpy array or CSR matrix of shape [n_samples] Target values.
Notes
-----
In the case when the binary labels are fractional
(probabilistic), inverse_transform chooses the class with the
greatest value. Typically, this allows to use the output of a
linear model's decision_function method directly as the input
of inverse_transform.
"""
check_is_fitted(self, 'classes_')
if threshold is None:
threshold = (self.pos_label + self.neg_label) / 2.
if self.y_type_ == "multiclass":
y_inv = _inverse_binarize_multiclass(Y, self.classes_)
else:
y_inv = _inverse_binarize_thresholding(Y, self.y_type_,
self.classes_, threshold)
if self.sparse_input_:
y_inv = sp.csr_matrix(y_inv)
elif sp.issparse(y_inv):
y_inv = y_inv.toarray()
return y_inv
def label_binarize(y, classes, neg_label=0, pos_label=1, sparse_output=False):
"""Binarize labels in a one-vs-all fashion
Several regression and binary classification algorithms are
available in the scikit. A simple way to extend these algorithms
to the multi-class classification case is to use the so-called
one-vs-all scheme.
This function makes it possible to compute this transformation for a
fixed set of class labels known ahead of time.
Parameters
----------
y : array-like
Sequence of integer labels or multilabel data to encode.
classes : array-like of shape [n_classes]
Uniquely holds the label for each class.
neg_label : int (default: 0)
Value with which negative labels must be encoded.
pos_label : int (default: 1)
Value with which positive labels must be encoded.
sparse_output : boolean (default: False),
Set to true if output binary array is desired in CSR sparse format
Returns
-------
Y : numpy array or CSR matrix of shape [n_samples, n_classes]
Shape will be [n_samples, 1] for binary problems.
Examples
--------
>>> from sklearn.preprocessing import label_binarize
>>> label_binarize([1, 6], classes=[1, 2, 4, 6])
array([[1, 0, 0, 0],
[0, 0, 0, 1]])
The class ordering is preserved:
>>> label_binarize([1, 6], classes=[1, 6, 4, 2])
array([[1, 0, 0, 0],
[0, 1, 0, 0]])
Binary targets transform to a column vector
>>> label_binarize(['yes', 'no', 'no', 'yes'], classes=['no', 'yes'])
array([[1],
[0],
[0],
[1]])
See also
--------
LabelBinarizer : class used to wrap the functionality of label_binarize and
allow for fitting to classes independently of the transform operation
"""
if not isinstance(y, list):
# XXX Workaround that will be removed when list of list format is
# dropped
y = check_array(y, accept_sparse='csr', ensure_2d=False, dtype=None)
else:
if _num_samples(y) == 0:
raise ValueError('y has 0 samples: %r' % y)
if neg_label >= pos_label:
raise ValueError("neg_label={0} must be strictly less than "
"pos_label={1}.".format(neg_label, pos_label))
if (sparse_output and (pos_label == 0 or neg_label != 0)):
raise ValueError("Sparse binarization is only supported with non "
"zero pos_label and zero neg_label, got "
"pos_label={0} and neg_label={1}"
"".format(pos_label, neg_label))
# To account for pos_label == 0 in the dense case
pos_switch = pos_label == 0
if pos_switch:
pos_label = -neg_label
y_type = type_of_target(y)
if 'multioutput' in y_type:
raise ValueError("Multioutput target data is not supported with label "
"binarization")
if y_type == 'unknown':
raise ValueError("The type of target data is not known")
n_samples = y.shape[0] if sp.issparse(y) else len(y)
n_classes = len(classes)
classes = np.asarray(classes)
if y_type == "binary":
if len(classes) == 1:
Y = np.zeros((len(y), 1), dtype=np.int)
Y += neg_label
return Y
elif len(classes) >= 3:
y_type = "multiclass"
sorted_class = np.sort(classes)
if (y_type == "multilabel-indicator" and classes.size != y.shape[1]):
raise ValueError("classes {0} missmatch with the labels {1}"
"found in the data".format(classes, unique_labels(y)))
if y_type in ("binary", "multiclass"):
y = column_or_1d(y)
# pick out the known labels from y
y_in_classes = in1d(y, classes)
y_seen = y[y_in_classes]
indices = np.searchsorted(sorted_class, y_seen)
indptr = np.hstack((0, np.cumsum(y_in_classes)))
data = np.empty_like(indices)
data.fill(pos_label)
Y = sp.csr_matrix((data, indices, indptr),
shape=(n_samples, n_classes))
elif y_type == "multilabel-indicator":
Y = sp.csr_matrix(y)
if pos_label != 1:
data = np.empty_like(Y.data)
data.fill(pos_label)
Y.data = data
else:
raise ValueError("%s target data is not supported with label "
"binarization" % y_type)
if not sparse_output:
Y = Y.toarray()
Y = astype(Y, int, copy=False)
if neg_label != 0:
Y[Y == 0] = neg_label
if pos_switch:
Y[Y == pos_label] = 0
else:
Y.data = astype(Y.data, int, copy=False)
# preserve label ordering
if np.any(classes != sorted_class):
indices = np.searchsorted(sorted_class, classes)
Y = Y[:, indices]
if y_type == "binary":
if sparse_output:
Y = Y.getcol(-1)
else:
Y = Y[:, -1].reshape((-1, 1))
return Y
def _inverse_binarize_multiclass(y, classes):
"""Inverse label binarization transformation for multiclass.
Multiclass uses the maximal score instead of a threshold.
"""
classes = np.asarray(classes)
if sp.issparse(y):
# Find the argmax for each row in y where y is a CSR matrix
y = y.tocsr()
n_samples, n_outputs = y.shape
outputs = np.arange(n_outputs)
row_max = sparse_min_max(y, 1)[1]
row_nnz = np.diff(y.indptr)
y_data_repeated_max = np.repeat(row_max, row_nnz)
# picks out all indices obtaining the maximum per row
y_i_all_argmax = np.flatnonzero(y_data_repeated_max == y.data)
# For corner case where last row has a max of 0
if row_max[-1] == 0:
y_i_all_argmax = np.append(y_i_all_argmax, [len(y.data)])
# Gets the index of the first argmax in each row from y_i_all_argmax
index_first_argmax = np.searchsorted(y_i_all_argmax, y.indptr[:-1])
# first argmax of each row
y_ind_ext = np.append(y.indices, [0])
y_i_argmax = y_ind_ext[y_i_all_argmax[index_first_argmax]]
# Handle rows of all 0
y_i_argmax[np.where(row_nnz == 0)[0]] = 0
# Handles rows with max of 0 that contain negative numbers
samples = np.arange(n_samples)[(row_nnz > 0) &
(row_max.ravel() == 0)]
for i in samples:
ind = y.indices[y.indptr[i]:y.indptr[i + 1]]
y_i_argmax[i] = classes[np.setdiff1d(outputs, ind)][0]
return classes[y_i_argmax]
else:
return classes.take(y.argmax(axis=1), mode="clip")
def _inverse_binarize_thresholding(y, output_type, classes, threshold):
"""Inverse label binarization transformation using thresholding."""
if output_type == "binary" and y.ndim == 2 and y.shape[1] > 2:
raise ValueError("output_type='binary', but y.shape = {0}".
format(y.shape))
if output_type != "binary" and y.shape[1] != len(classes):
raise ValueError("The number of class is not equal to the number of "
"dimension of y.")
classes = np.asarray(classes)
# Perform thresholding
if sp.issparse(y):
if threshold > 0:
if y.format not in ('csr', 'csc'):
y = y.tocsr()
y.data = np.array(y.data > threshold, dtype=np.int)
y.eliminate_zeros()
else:
y = np.array(y.toarray() > threshold, dtype=np.int)
else:
y = np.array(y > threshold, dtype=np.int)
# Inverse transform data
if output_type == "binary":
if sp.issparse(y):
y = y.toarray()
if y.ndim == 2 and y.shape[1] == 2:
return classes[y[:, 1]]
else:
if len(classes) == 1:
y = np.empty(len(y), dtype=classes.dtype)
y.fill(classes[0])
return y
else:
return classes[y.ravel()]
elif output_type == "multilabel-indicator":
return y
else:
raise ValueError("{0} format is not supported".format(output_type))
class MultiLabelBinarizer(BaseEstimator, TransformerMixin):
"""Transform between iterable of iterables and a multilabel format
Although a list of sets or tuples is a very intuitive format for multilabel
data, it is unwieldy to process. This transformer converts between this
intuitive format and the supported multilabel format: a (samples x classes)
binary matrix indicating the presence of a class label.
Parameters
----------
classes : array-like of shape [n_classes] (optional)
Indicates an ordering for the class labels
sparse_output : boolean (default: False),
Set to true if output binary array is desired in CSR sparse format
Attributes
----------
classes_ : array of labels
A copy of the `classes` parameter where provided,
or otherwise, the sorted set of classes found when fitting.
Examples
--------
>>> mlb = MultiLabelBinarizer()
>>> mlb.fit_transform([(1, 2), (3,)])
array([[1, 1, 0],
[0, 0, 1]])
>>> mlb.classes_
array([1, 2, 3])
>>> mlb.fit_transform([set(['sci-fi', 'thriller']), set(['comedy'])])
array([[0, 1, 1],
[1, 0, 0]])
>>> list(mlb.classes_)
['comedy', 'sci-fi', 'thriller']
"""
def __init__(self, classes=None, sparse_output=False):
self.classes = classes
self.sparse_output = sparse_output
def fit(self, y):
"""Fit the label sets binarizer, storing `classes_`
Parameters
----------
y : iterable of iterables
A set of labels (any orderable and hashable object) for each
sample. If the `classes` parameter is set, `y` will not be
iterated.
Returns
-------
self : returns this MultiLabelBinarizer instance
"""
if self.classes is None:
classes = sorted(set(itertools.chain.from_iterable(y)))
else:
classes = self.classes
dtype = np.int if all(isinstance(c, int) for c in classes) else object
self.classes_ = np.empty(len(classes), dtype=dtype)
self.classes_[:] = classes
return self
def fit_transform(self, y):
"""Fit the label sets binarizer and transform the given label sets
Parameters
----------
y : iterable of iterables
A set of labels (any orderable and hashable object) for each
sample. If the `classes` parameter is set, `y` will not be
iterated.
Returns
-------
y_indicator : array or CSR matrix, shape (n_samples, n_classes)
A matrix such that `y_indicator[i, j] = 1` iff `classes_[j]` is in
`y[i]`, and 0 otherwise.
"""
if self.classes is not None:
return self.fit(y).transform(y)
# Automatically increment on new class
class_mapping = defaultdict(int)
class_mapping.default_factory = class_mapping.__len__
yt = self._transform(y, class_mapping)
# sort classes and reorder columns
tmp = sorted(class_mapping, key=class_mapping.get)
# (make safe for tuples)
dtype = np.int if all(isinstance(c, int) for c in tmp) else object
class_mapping = np.empty(len(tmp), dtype=dtype)
class_mapping[:] = tmp
self.classes_, inverse = np.unique(class_mapping, return_inverse=True)
yt.indices = np.take(inverse, yt.indices)
if not self.sparse_output:
yt = yt.toarray()
return yt
def transform(self, y):
"""Transform the given label sets
Parameters
----------
y : iterable of iterables
A set of labels (any orderable and hashable object) for each
sample. If the `classes` parameter is set, `y` will not be
iterated.
Returns
-------
y_indicator : array or CSR matrix, shape (n_samples, n_classes)
A matrix such that `y_indicator[i, j] = 1` iff `classes_[j]` is in
`y[i]`, and 0 otherwise.
"""
class_to_index = dict(zip(self.classes_, range(len(self.classes_))))
yt = self._transform(y, class_to_index)
if not self.sparse_output:
yt = yt.toarray()
return yt
def _transform(self, y, class_mapping):
"""Transforms the label sets with a given mapping
Parameters
----------
y : iterable of iterables
class_mapping : Mapping
Maps from label to column index in label indicator matrix
Returns
-------
y_indicator : sparse CSR matrix, shape (n_samples, n_classes)
Label indicator matrix
"""
indices = array.array('i')
indptr = array.array('i', [0])
for labels in y:
indices.extend(set(class_mapping[label] for label in labels))
indptr.append(len(indices))
data = np.ones(len(indices), dtype=int)
return sp.csr_matrix((data, indices, indptr),
shape=(len(indptr) - 1, len(class_mapping)))
def inverse_transform(self, yt):
"""Transform the given indicator matrix into label sets
Parameters
----------
yt : array or sparse matrix of shape (n_samples, n_classes)
A matrix containing only 1s ands 0s.
Returns
-------
y : list of tuples
The set of labels for each sample such that `y[i]` consists of
`classes_[j]` for each `yt[i, j] == 1`.
"""
if yt.shape[1] != len(self.classes_):
raise ValueError('Expected indicator for {0} classes, but got {1}'
.format(len(self.classes_), yt.shape[1]))
if sp.issparse(yt):
yt = yt.tocsr()
if len(yt.data) != 0 and len(np.setdiff1d(yt.data, [0, 1])) > 0:
raise ValueError('Expected only 0s and 1s in label indicator.')
return [tuple(self.classes_.take(yt.indices[start:end]))
for start, end in zip(yt.indptr[:-1], yt.indptr[1:])]
else:
unexpected = np.setdiff1d(yt, [0, 1])
if len(unexpected) > 0:
raise ValueError('Expected only 0s and 1s in label indicator. '
'Also got {0}'.format(unexpected))
return [tuple(self.classes_.compress(indicators)) for indicators
in yt]
| bsd-3-clause |
ABoothInTheWild/baseball-research | Playoff Odds/mlbPlayoffOdds2018/RunsScoredAllowedSimulator.py | 1 | 4718 | # -*- coding: utf-8 -*-
"""
Created on Wed Oct 3 22:46:57 2018
@author: ABooth
"""
import numpy as np
import pandas as pd
def ImportRunsScoredAllowedDF(fileLoc):
#define data
df = pd.read_csv(fileLoc)
df["Home/Away"] = pd.get_dummies(df['Unnamed: 4'])
df["Won"] = df["R"] > df["RA"]
dfSub = df[["R", "RA", "Home/Away", "Won"]].astype("float")
return dfSub
#define simulator
def RunsSimulator(team1_df, team2_df, useHomeFieldAdv, niterations, negative_binom_size, length_of_series = 1):
#set seed for duplicity
np.random.seed(1234)
#set game arrays
rtnScores = np.empty((niterations, length_of_series * 2))
#Set home field games. Take the game number, subtract 1 and double it
#for baseball, that's Games 1,2,6,7
#make sure team with home field advantage is team1
#team2 home advantage is Games 3,4,5
team1_HOME = [0,2,10,12]
team2_HOME = [4,6,8]
#Get Means
sampleMeans = getOffensiveAndDefensiveMeans(team1_df, team2_df, useHomeFieldAdv)
for i in range(niterations):
for j in range(0, 1 * 2, 2):
if not useHomeFieldAdv:
#Get game score
sample_game_score = getSampleScores(sampleMeans[0], sampleMeans[1], sampleMeans[2], sampleMeans[3], 1, negative_binom_size)
else:
if j in team1_HOME:
sample_game_score = getSampleScores(sampleMeans[0], sampleMeans[2], sampleMeans[5], sampleMeans[7], 1, negative_binom_size)
elif j in team2_HOME:
sample_game_score = getSampleScores(sampleMeans[1], sampleMeans[3], sampleMeans[4], sampleMeans[6], 1, negative_binom_size)
else:
print("Something has gone terrible wrong")
break
#record score
rtnScores[i,j] = sample_game_score[0]
rtnScores[i,j+1] = sample_game_score[1]
results = pd.DataFrame(rtnScores, columns = ["Team1Score", "Team2Score"])
results["WinningTeam"] = np.where(results['Team1Score'] > results["Team2Score"], 'Team1', 'Team2')
return len(results[results.WinningTeam == "Team1"])/float(niterations)
#return results
def getOffensiveAndDefensiveMeans(team1_df, team2_df, useHomeFieldAdv):
rtnMeans = []
if not useHomeFieldAdv:
#Get Means
team1_off_mean = np.mean(team1_df["R"])
team1_def_mean = np.mean(team1_df["RA"])
team2_off_mean = np.mean(team2_df["R"])
team2_def_mean = np.mean(team2_df["RA"])
rtnMeans = np.array([team1_off_mean, team1_def_mean, team2_off_mean, team2_def_mean])
else:
#Get Means
team1_off_mean_HOME = np.mean(team1_df[team1_df["Home/Away"] == 0]["R"])
team1_off_mean_AWAY = np.mean(team1_df[team1_df["Home/Away"] == 1]["R"])
team1_def_mean_HOME = np.mean(team1_df[team1_df["Home/Away"] == 0]["RA"])
team1_def_mean_AWAY = np.mean(team1_df[team1_df["Home/Away"] == 1]["RA"])
team2_off_mean_HOME = np.mean(team2_df[team2_df["Home/Away"] == 0]["R"])
team2_off_mean_AWAY = np.mean(team2_df[team2_df["Home/Away"] == 1]["R"])
team2_def_mean_HOME = np.mean(team2_df[team2_df["Home/Away"] == 0]["RA"])
team2_def_mean_AWAY = np.mean(team2_df[team2_df["Home/Away"] == 1]["RA"])
rtnMeans = np.array([team1_off_mean_HOME, team1_off_mean_AWAY, team1_def_mean_HOME, team1_def_mean_AWAY,
team2_off_mean_HOME, team2_off_mean_AWAY, team2_def_mean_HOME, team2_def_mean_AWAY])
return rtnMeans
def getSampleScores(team1_off_mean, team1_def_mean, team2_off_mean, team2_def_mean, nScores, size):
#sample from negative binomial for each statistic
team1_off = np.random.negative_binomial(size, size/(size+team1_off_mean), nScores)
team1_def = np.random.negative_binomial(size, size/(size+team1_def_mean), nScores)
team2_off = np.random.negative_binomial(size, size/(size+team2_off_mean), nScores)
team2_def = np.random.negative_binomial(size, size/(size+team2_def_mean), nScores)
#determine final game score
team1_score = round(np.mean(np.array([team1_off, team2_def])))
team2_score = round(np.mean(np.array([team2_off, team1_def])))
rtnScoreArray = np.array([team1_score, team2_score])
#Check for ties
if(team1_score == team2_score):
rtnScoreArray = getSampleScores(team1_off_mean, team1_def_mean, team2_off_mean, team2_def_mean, nScores, size)
return rtnScoreArray
| gpl-3.0 |
ryanpdwyer/hdf5plotter | setup.py | 1 | 1784 | #!/usr/bin/env python
import os
import sys
from setuptools import setup
# See https://github.com/warner/python-versioneer
import versioneer
versioneer.VCS = 'git'
versioneer.versionfile_source = 'hdf5plotter/_version.py'
versioneer.versionfile_build = 'hdf5plotter/_version.py'
versioneer.tag_prefix = '' # tags are like 1.2.0
versioneer.parentdir_prefix = 'hdf5plotter-' # dirname like 'myproject-1.2.0'
readme = open('README.rst').read()
doclink = """
Documentation
-------------
The full documentation is at http://hdf5plotter.rtfd.org."""
history = open('HISTORY.rst').read().replace('.. :changelog:', '')
setup(
name='hdf5plotter',
version=versioneer.get_version(),
description='Plot data contained in HDF5 files.',
long_description=readme + '\n\n' + doclink + '\n\n' + history,
author='Ryan Dwyer',
author_email='[email protected]',
url='https://github.com/ryanpdwyer/hdf5plotter',
packages=[
'hdf5plotter'
],
include_package_data=True,
install_requires=['numpy', 'scipy', 'matplotlib', 'h5py', 'pint', 'bunch',
'pandas', 'click', 'seaborn', 'pathlib', 'six'],
tests_require=['nose>=1.0'],
test_suite='nose.collector',
license='MIT',
zip_safe=False,
cmdclass=versioneer.get_cmdclass(),
entry_points="""
[console_scripts]
h5plot=hdf5plotter._cli:h5plot
csvplot=hdf5plotter._cli:csvplot
csvscale=hdf5plotter._cli:csvscale
""",
keywords='hdf5plotter',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7'
]
)
| mit |
crslab/Inverse-Reinforcement-Learning | examples/experiments.py | 1 | 19494 | """
Perform the experiments from the report.
Matthew Alger, 2015
[email protected]
"""
from time import time
from sys import stdout
import numpy as np
import matplotlib.pyplot as plt
from irl import maxent
from irl import deep_maxent
from irl import value_iteration
from irl.mdp.gridworld import Gridworld
from irl.mdp.objectworld import Objectworld
def test_gw_once(grid_size, feature_map, n_samples, epochs, structure):
"""
Test MaxEnt and DeepMaxEnt on a gw of size grid_size with the feature
map feature_map with n_samples paths.
grid_size: Grid size. int.
feature_map: Which feature map to use. String in {ident, coord, proxi}.
n_samples: Number of paths to sample.
epochs: Number of epochs to run MaxEnt with.
structure: Neural network structure tuple, e.g. (3, 3) would be a
3-layer neural network with assumed inputs.
-> Expected value difference for MaxEnt, DeepMaxEnt
"""
# Basic gist of what we're doing here: Get the reward function using our
# different IRL methods, use those to get a policy, evaluate that policy
# using the true reward, and then return the difference in expected values.
# Setup parameters.
wind = 0.3
discount = 0.9
learning_rate = 0.01
trajectory_length = 3*grid_size
# Make the gridworld and associated data.
gw = Gridworld(grid_size, wind, discount)
feature_matrix = gw.feature_matrix(feature_map)
ground_reward = np.array([gw.reward(i) for i in range(gw.n_states)])
optimal_policy = value_iteration.find_policy(gw.n_states,
gw.n_actions,
gw.transition_probability,
ground_reward,
discount).argmax(axis=1)
trajectories = gw.generate_trajectories(n_samples,
trajectory_length,
optimal_policy.take)
p_start_state = (np.bincount(trajectories[:, 0, 0], minlength=ow.n_states) /
trajectories.shape[0])
# True value.
optimal_V = value_iteration.optimal_value(gw.n_states,
gw.n_actions,
gw.transition_probability,
ground_reward, gw.discount)
# MaxEnt reward; policy; value.
maxent_reward = deep_maxent.irl((feature_matrix.shape[1],),
feature_matrix,
gw.n_actions,
gw.discount,
gw.transition_probability,
trajectories, epochs, learning_rate)
maxent_policy = value_iteration.find_policy(gw.n_states,
gw.n_actions,
gw.transition_probability,
maxent_reward,
discount).argmax(axis=1)
maxent_V = value_iteration.value(maxent_policy,
gw.n_states,
gw.transition_probability,
ground_reward,
gw.discount)
maxent_EVD = optimal_V.dot(p_start_state) - maxent_V.dot(p_start_state)
# DeepMaxEnt reward; policy; value.
deep_maxent_reward = deep_maxent.irl((feature_matrix.shape[1],)+structure,
feature_matrix,
gw.n_actions,
gw.discount,
gw.transition_probability,
trajectories, epochs, learning_rate)
deep_maxent_policy = value_iteration.find_policy(gw.n_states,
gw.n_actions,
gw.transition_probability,
deep_maxent_reward,
discount).argmax(axis=1)
deep_maxent_V = value_iteration.value(deep_maxent_policy,
gw.n_states,
gw.transition_probability,
ground_reward,
gw.discount)
deep_maxent_EVD = (optimal_V.dot(p_start_state) -
deep_maxent_V.dot(p_start_state))
plt.subplot(3, 3, 1)
plt.pcolor(ground_reward.reshape((grid_size, grid_size)))
plt.title("Groundtruth reward")
plt.tick_params(labeltop=False, labelbottom=False, labelleft=False,
bottom=False, top=False, left=False, right=False,
labelright=False)
plt.subplot(3, 3, 2)
plt.pcolor(maxent_reward.reshape((grid_size, grid_size)))
plt.title("MaxEnt reward")
plt.tick_params(labeltop=False, labelbottom=False, labelleft=False,
bottom=False, top=False, left=False, right=False,
labelright=False)
plt.subplot(3, 3, 3)
plt.pcolor(deep_maxent_reward.reshape((grid_size, grid_size)))
plt.title("DeepMaxEnt reward")
plt.tick_params(labeltop=False, labelbottom=False, labelleft=False,
bottom=False, top=False, left=False, right=False,
labelright=False)
plt.subplot(3, 3, 4)
plt.pcolor(optimal_policy.reshape((grid_size, grid_size)), vmin=0, vmax=3)
plt.title("Optimal policy")
plt.tick_params(labeltop=False, labelbottom=False, labelleft=False,
bottom=False, top=False, left=False, right=False,
labelright=False)
plt.subplot(3, 3, 5)
plt.pcolor(maxent_policy.reshape((grid_size, grid_size)), vmin=0, vmax=3)
plt.title("MaxEnt policy")
plt.tick_params(labeltop=False, labelbottom=False, labelleft=False,
bottom=False, top=False, left=False, right=False,
labelright=False)
plt.subplot(3, 3, 6)
plt.pcolor(deep_maxent_policy.reshape((grid_size, grid_size)),
vmin=0, vmax=3)
plt.title("DeepMaxEnt policy")
plt.tick_params(labeltop=False, labelbottom=False, labelleft=False,
bottom=False, top=False, left=False, right=False,
labelright=False)
plt.subplot(3, 3, 7)
plt.pcolor(optimal_V.reshape((grid_size, grid_size)))
plt.title("Optimal value")
plt.tick_params(labeltop=False, labelbottom=False, labelleft=False,
bottom=False, top=False, left=False, right=False,
labelright=False)
plt.subplot(3, 3, 8)
plt.pcolor(maxent_V.reshape((grid_size, grid_size)))
plt.title("MaxEnt value")
plt.tick_params(labeltop=False, labelbottom=False, labelleft=False,
bottom=False, top=False, left=False, right=False,
labelright=False)
plt.subplot(3, 3, 9)
plt.pcolor(deep_maxent_V.reshape((grid_size, grid_size)))
plt.title("DeepMaxEnt value")
plt.tick_params(labeltop=False, labelbottom=False, labelleft=False,
bottom=False, top=False, left=False, right=False,
labelright=False)
plt.savefig("{}_{}_{}_{}gridworld{}.png".format(grid_size, feature_map,
n_samples, epochs, structure, np.random.randint(10000000)))
return maxent_EVD, deep_maxent_EVD
def test_ow_once(grid_size, n_objects, n_colours, discrete, l1, l2, n_samples,
epochs, structure):
"""
Test MaxEnt and DeepMaxEnt on a ow of size grid_size with the feature
map feature_map with n_samples paths.
grid_size: Grid size. int.
n_objects: Number of objects. int.
n_colours: Number of colours. int.
discrete: Whether the features should be discrete. bool.
l1: L1 regularisation. float.
l2: L2 regularisation. float.
n_samples: Number of paths to sample.
epochs: Number of epochs to run MaxEnt with.
structure: Neural network structure tuple, e.g. (3, 3) would be a
3-layer neural network with assumed inputs.
-> Expected value difference for MaxEnt, DeepMaxEnt
"""
# Basic gist of what we're doing here: Get the reward function using our
# different IRL methods, use those to get a policy, evaluate that policy
# using the true reward, and then return the difference in expected values.
# Setup parameters.
wind = 0.3
discount = 0.9
learning_rate = 0.01
trajectory_length = 3*grid_size
# Make the objectworld and associated data.
ow = Objectworld(grid_size, n_objects, n_colours, wind, discount)
feature_matrix = ow.feature_matrix(discrete)
ground_reward = np.array([ow.reward(i) for i in range(ow.n_states)])
optimal_policy = value_iteration.find_policy(ow.n_states,
ow.n_actions,
ow.transition_probability,
ground_reward,
discount).argmax(axis=1)
trajectories = ow.generate_trajectories(n_samples,
trajectory_length,
optimal_policy.take)
p_start_state = (np.bincount(trajectories[:, 0, 0], minlength=ow.n_states) /
trajectories.shape[0])
# True value.
optimal_V = value_iteration.optimal_value(ow.n_states,
ow.n_actions,
ow.transition_probability,
ground_reward, ow.discount)
# MaxEnt reward; policy; value.
maxent_reward = deep_maxent.irl((feature_matrix.shape[1],),
feature_matrix,
ow.n_actions,
ow.discount,
ow.transition_probability,
trajectories, epochs, learning_rate,
l1=l1, l2=l2)
maxent_policy = value_iteration.find_policy(ow.n_states,
ow.n_actions,
ow.transition_probability,
maxent_reward,
discount).argmax(axis=1)
maxent_V = value_iteration.value(maxent_policy,
ow.n_states,
ow.transition_probability,
ground_reward,
ow.discount)
maxent_EVD = optimal_V.dot(p_start_state) - maxent_V.dot(p_start_state)
# DeepMaxEnt reward; policy; value.
deep_learning_rate = 0.005 # For the 32 x 32 experiments.
deep_maxent_reward = deep_maxent.irl((feature_matrix.shape[1],)+structure,
feature_matrix,
ow.n_actions,
ow.discount,
ow.transition_probability,
trajectories, epochs,
deep_learning_rate,
l1=l1, l2=l2)
deep_maxent_policy = value_iteration.find_policy(ow.n_states,
ow.n_actions,
ow.transition_probability,
deep_maxent_reward,
discount).argmax(axis=1)
deep_maxent_V = value_iteration.value(deep_maxent_policy,
ow.n_states,
ow.transition_probability,
ground_reward,
ow.discount)
deep_maxent_EVD = (optimal_V.dot(p_start_state) -
deep_maxent_V.dot(p_start_state))
plt.subplot(3, 3, 1)
plt.pcolor(ground_reward.reshape((grid_size, grid_size)))
plt.title("Groundtruth reward")
plt.tick_params(labeltop=False, labelbottom=False, labelleft=False,
bottom=False, top=False, left=False, right=False, labelright=False)
plt.subplot(3, 3, 2)
plt.pcolor(maxent_reward.reshape((grid_size, grid_size)))
plt.title("MaxEnt reward")
plt.tick_params(labeltop=False, labelbottom=False, labelleft=False,
bottom=False, top=False, left=False, right=False, labelright=False)
plt.subplot(3, 3, 3)
plt.pcolor(deep_maxent_reward.reshape((grid_size, grid_size)))
plt.title("DeepMaxEnt reward")
plt.tick_params(labeltop=False, labelbottom=False, labelleft=False,
bottom=False, top=False, left=False, right=False, labelright=False)
plt.subplot(3, 3, 4)
plt.pcolor(optimal_policy.reshape((grid_size, grid_size)), vmin=0, vmax=3)
plt.title("Optimal policy")
plt.tick_params(labeltop=False, labelbottom=False, labelleft=False,
bottom=False, top=False, left=False, right=False, labelright=False)
plt.subplot(3, 3, 5)
plt.pcolor(maxent_policy.reshape((grid_size, grid_size)), vmin=0, vmax=3)
plt.title("MaxEnt policy")
plt.tick_params(labeltop=False, labelbottom=False, labelleft=False,
bottom=False, top=False, left=False, right=False, labelright=False)
plt.subplot(3, 3, 6)
plt.pcolor(deep_maxent_policy.reshape((grid_size, grid_size)),
vmin=0, vmax=3)
plt.title("DeepMaxEnt policy")
plt.tick_params(labeltop=False, labelbottom=False, labelleft=False,
bottom=False, top=False, left=False, right=False, labelright=False)
plt.subplot(3, 3, 7)
plt.pcolor(optimal_V.reshape((grid_size, grid_size)))
plt.title("Optimal value")
plt.tick_params(labeltop=False, labelbottom=False, labelleft=False,
bottom=False, top=False, left=False, right=False, labelright=False)
plt.subplot(3, 3, 8)
plt.pcolor(maxent_V.reshape((grid_size, grid_size)))
plt.title("MaxEnt value")
plt.tick_params(labeltop=False, labelbottom=False, labelleft=False,
bottom=False, top=False, left=False, right=False, labelright=False)
plt.subplot(3, 3, 9)
plt.pcolor(deep_maxent_V.reshape((grid_size, grid_size)))
plt.title("DeepMaxEnt value")
plt.tick_params(labeltop=False, labelbottom=False, labelleft=False,
bottom=False, top=False, left=False, right=False, labelright=False)
plt.savefig("{}_{}_{}_{}_{}_{}_{}_{}_{}_objectworld_{}.png".format(
grid_size, n_objects, n_colours, discrete, n_samples, epochs, structure,
l1, l2, np.random.randint(10000000)))
return maxent_EVD, deep_maxent_EVD
def test_gw_over_samples(grid_size, feature_map, epochs, structure, n):
"""
Test MaxEnt and DeepMaxEnt on a gridworld of size grid_size with the feature
map feature_map with different numbers of paths.
grid_size: Grid size. int.
feature_map: Which feature map to use. String in {ident, coord, proxi}.
epochs: MaxEnt iterations. int.
structure: Neural network structure tuple, e.g. (3, 3) would be a
3-layer neural network with assumed inputs.
n: Iterations. int.
-> (MaxEnt [(n_samples, mean expected value difference, stdev)],
DeepMaxEnt [(n_samples, mean expected value difference, stdev)]),
raw data (maxent_data, deep_maxent_data)
"""
maxent_data = []
deep_maxent_data = []
for n_samples in (32,):
t = time()
maxent_EVDs = []
deep_maxent_EVDs = []
for i in range(n):
print("{}: {}/{}".format(n_samples, i+1, n))
maxent_EVD, deep_maxent_EVD = test_gw_once(grid_size, feature_map,
n_samples, epochs,
structure)
maxent_EVDs.append(maxent_EVD)
deep_maxent_EVDs.append(deep_maxent_EVD)
print(maxent_EVD, deep_maxent_EVD)
stdout.flush()
maxent_data.append((n_samples, np.mean(maxent_EVDs),
np.std(maxent_EVDs)))
deep_maxent_data.append((n_samples, np.mean(deep_maxent_EVDs),
np.std(deep_maxent_EVDs)))
print("{} (took {:.02}s)".format(n_samples, time() - t))
print("MaxEnt:", maxent_data)
print("DeepMaxEnt:", deep_maxent_data)
return maxent_data, deep_maxent_data
def test_ow_over_samples(grid_size, n_objects, n_colours, discrete, l1, l2,
epochs, structure, n):
"""
Test MaxEnt and DeepMaxEnt on an objectworld with different numbers of paths.
grid_size: Grid size. int.
n_objects: Number of objects. int.
n_colours: Number of colours. int.
discrete: Whether the features should be discrete. bool.
feature_map: Which feature map to use. String in {ident, coord, proxi}.
l1: L1 regularisation. float.
l2: L2 regularisation. float.
epochs: MaxEnt iterations. int.
structure: Neural network structure tuple, e.g. (3, 3) would be a
3-layer neural network with assumed inputs.
n: Iterations. int.
-> (MaxEnt [(n_samples, mean expected value difference, stdev)],
DeepMaxEnt [(n_samples, mean expected value difference, stdev)]),
raw data (maxent_data, deep_maxent_data)
"""
maxent_data = []
deep_maxent_data = []
for n_samples in (32, 16, 8, 4):
t = time()
maxent_EVDs = []
deep_maxent_EVDs = []
for i in range(n):
print("{}: {}/{}".format(n_samples, i+1, n))
maxent_EVD, deep_maxent_EVD = test_ow_once(grid_size, n_objects,
n_colours, discrete, l1, l2, n_samples, epochs, structure)
maxent_EVDs.append(maxent_EVD)
deep_maxent_EVDs.append(deep_maxent_EVD)
print(maxent_EVD, deep_maxent_EVD)
stdout.flush()
maxent_data.append((n_samples, np.mean(maxent_EVDs),
np.median(maxent_EVDs), np.std(maxent_EVDs)))
deep_maxent_data.append((n_samples, np.mean(deep_maxent_EVDs),
np.median(deep_maxent_EVDs), np.std(deep_maxent_EVDs)))
print("{} (took {:.02}s)".format(n_samples, time() - t))
print("MaxEnt:", maxent_data)
print("DeepMaxEnt:", deep_maxent_data)
return maxent_data, deep_maxent_data
if __name__ == '__main__':
# Tests the 16 x 16 objectworld.
print(test_ow_over_samples(16, 25, 2, False, 0, 0, 150, (3, 3), 10))
# Tests the 32 x 32 objectworld.
print(test_ow_over_samples(32, 50, 2, False, 0, 0, 250, (3, 3), 5)) | mit |
DailyActie/Surrogate-Model | 01-codes/scipy-master/scipy/interpolate/interpolate.py | 1 | 101080 | """ Classes for interpolating values.
"""
from __future__ import division, print_function, absolute_import
__all__ = ['interp1d', 'interp2d', 'spline', 'spleval', 'splmake', 'spltopp',
'ppform', 'lagrange', 'PPoly', 'BPoly', 'NdPPoly',
'RegularGridInterpolator', 'interpn']
import itertools
import warnings
import functools
import operator
import numpy as np
from numpy import (array, transpose, searchsorted, atleast_1d, atleast_2d,
dot, ravel, poly1d, asarray, intp)
import scipy.linalg
import scipy.special as spec
from scipy.special import comb
from scipy._lib.six import xrange, integer_types, string_types
from . import fitpack
from . import dfitpack
from . import _fitpack
from .polyint import _Interpolator1D
from . import _ppoly
from .fitpack2 import RectBivariateSpline
from .interpnd import _ndim_coords_from_arrays
from ._bsplines import make_interp_spline, BSpline
def prod(x):
"""Product of a list of numbers; ~40x faster vs np.prod for Python tuples"""
if len(x) == 0:
return 1
return functools.reduce(operator.mul, x)
def lagrange(x, w):
"""
Return a Lagrange interpolating polynomial.
Given two 1-D arrays `x` and `w,` returns the Lagrange interpolating
polynomial through the points ``(x, w)``.
Warning: This implementation is numerically unstable. Do not expect to
be able to use more than about 20 points even if they are chosen optimally.
Parameters
----------
x : array_like
`x` represents the x-coordinates of a set of datapoints.
w : array_like
`w` represents the y-coordinates of a set of datapoints, i.e. f(`x`).
Returns
-------
lagrange : numpy.poly1d instance
The Lagrange interpolating polynomial.
"""
M = len(x)
p = poly1d(0.0)
for j in xrange(M):
pt = poly1d(w[j])
for k in xrange(M):
if k == j:
continue
fac = x[j] - x[k]
pt *= poly1d([1.0, -x[k]]) / fac
p += pt
return p
# !! Need to find argument for keeping initialize. If it isn't
# !! found, get rid of it!
class interp2d(object):
"""
interp2d(x, y, z, kind='linear', copy=True, bounds_error=False,
fill_value=nan)
Interpolate over a 2-D grid.
`x`, `y` and `z` are arrays of values used to approximate some function
f: ``z = f(x, y)``. This class returns a function whose call method uses
spline interpolation to find the value of new points.
If `x` and `y` represent a regular grid, consider using
RectBivariateSpline.
Note that calling `interp2d` with NaNs present in input values results in
undefined behaviour.
Methods
-------
__call__
Parameters
----------
x, y : array_like
Arrays defining the data point coordinates.
If the points lie on a regular grid, `x` can specify the column
coordinates and `y` the row coordinates, for example::
>>> x = [0,1,2]; y = [0,3]; z = [[1,2,3], [4,5,6]]
Otherwise, `x` and `y` must specify the full coordinates for each
point, for example::
>>> x = [0,1,2,0,1,2]; y = [0,0,0,3,3,3]; z = [1,2,3,4,5,6]
If `x` and `y` are multi-dimensional, they are flattened before use.
z : array_like
The values of the function to interpolate at the data points. If
`z` is a multi-dimensional array, it is flattened before use. The
length of a flattened `z` array is either
len(`x`)*len(`y`) if `x` and `y` specify the column and row coordinates
or ``len(z) == len(x) == len(y)`` if `x` and `y` specify coordinates
for each point.
kind : {'linear', 'cubic', 'quintic'}, optional
The kind of spline interpolation to use. Default is 'linear'.
copy : bool, optional
If True, the class makes internal copies of x, y and z.
If False, references may be used. The default is to copy.
bounds_error : bool, optional
If True, when interpolated values are requested outside of the
domain of the input data (x,y), a ValueError is raised.
If False, then `fill_value` is used.
fill_value : number, optional
If provided, the value to use for points outside of the
interpolation domain. If omitted (None), values outside
the domain are extrapolated.
See Also
--------
RectBivariateSpline :
Much faster 2D interpolation if your input data is on a grid
bisplrep, bisplev :
Spline interpolation based on FITPACK
BivariateSpline : a more recent wrapper of the FITPACK routines
interp1d : one dimension version of this function
Notes
-----
The minimum number of data points required along the interpolation
axis is ``(k+1)**2``, with k=1 for linear, k=3 for cubic and k=5 for
quintic interpolation.
The interpolator is constructed by `bisplrep`, with a smoothing factor
of 0. If more control over smoothing is needed, `bisplrep` should be
used directly.
Examples
--------
Construct a 2-D grid and interpolate on it:
>>> from scipy import interpolate
>>> x = np.arange(-5.01, 5.01, 0.25)
>>> y = np.arange(-5.01, 5.01, 0.25)
>>> xx, yy = np.meshgrid(x, y)
>>> z = np.sin(xx**2+yy**2)
>>> f = interpolate.interp2d(x, y, z, kind='cubic')
Now use the obtained interpolation function and plot the result:
>>> import matplotlib.pyplot as plt
>>> xnew = np.arange(-5.01, 5.01, 1e-2)
>>> ynew = np.arange(-5.01, 5.01, 1e-2)
>>> znew = f(xnew, ynew)
>>> plt.plot(x, z[0, :], 'ro-', xnew, znew[0, :], 'b-')
>>> plt.show()
"""
def __init__(self, x, y, z, kind='linear', copy=True, bounds_error=False,
fill_value=None):
x = ravel(x)
y = ravel(y)
z = asarray(z)
rectangular_grid = (z.size == len(x) * len(y))
if rectangular_grid:
if z.ndim == 2:
if z.shape != (len(y), len(x)):
raise ValueError("When on a regular grid with x.size = m "
"and y.size = n, if z.ndim == 2, then z "
"must have shape (n, m)")
if not np.all(x[1:] >= x[:-1]):
j = np.argsort(x)
x = x[j]
z = z[:, j]
if not np.all(y[1:] >= y[:-1]):
j = np.argsort(y)
y = y[j]
z = z[j, :]
z = ravel(z.T)
else:
z = ravel(z)
if len(x) != len(y):
raise ValueError(
"x and y must have equal lengths for non rectangular grid")
if len(z) != len(x):
raise ValueError(
"Invalid length for input z for non rectangular grid")
try:
kx = ky = {'linear': 1,
'cubic': 3,
'quintic': 5}[kind]
except KeyError:
raise ValueError("Unsupported interpolation type.")
if not rectangular_grid:
# TODO: surfit is really not meant for interpolation!
self.tck = fitpack.bisplrep(x, y, z, kx=kx, ky=ky, s=0.0)
else:
nx, tx, ny, ty, c, fp, ier = dfitpack.regrid_smth(
x, y, z, None, None, None, None,
kx=kx, ky=ky, s=0.0)
self.tck = (tx[:nx], ty[:ny], c[:(nx - kx - 1) * (ny - ky - 1)],
kx, ky)
self.bounds_error = bounds_error
self.fill_value = fill_value
self.x, self.y, self.z = [array(a, copy=copy) for a in (x, y, z)]
self.x_min, self.x_max = np.amin(x), np.amax(x)
self.y_min, self.y_max = np.amin(y), np.amax(y)
def __call__(self, x, y, dx=0, dy=0, assume_sorted=False):
"""Interpolate the function.
Parameters
----------
x : 1D array
x-coordinates of the mesh on which to interpolate.
y : 1D array
y-coordinates of the mesh on which to interpolate.
dx : int >= 0, < kx
Order of partial derivatives in x.
dy : int >= 0, < ky
Order of partial derivatives in y.
assume_sorted : bool, optional
If False, values of `x` and `y` can be in any order and they are
sorted first.
If True, `x` and `y` have to be arrays of monotonically
increasing values.
Returns
-------
z : 2D array with shape (len(y), len(x))
The interpolated values.
"""
x = atleast_1d(x)
y = atleast_1d(y)
if x.ndim != 1 or y.ndim != 1:
raise ValueError("x and y should both be 1-D arrays")
if not assume_sorted:
x = np.sort(x)
y = np.sort(y)
if self.bounds_error or self.fill_value is not None:
out_of_bounds_x = (x < self.x_min) | (x > self.x_max)
out_of_bounds_y = (y < self.y_min) | (y > self.y_max)
any_out_of_bounds_x = np.any(out_of_bounds_x)
any_out_of_bounds_y = np.any(out_of_bounds_y)
if self.bounds_error and (any_out_of_bounds_x or any_out_of_bounds_y):
raise ValueError("Values out of range; x must be in %r, y in %r"
% ((self.x_min, self.x_max),
(self.y_min, self.y_max)))
z = fitpack.bisplev(x, y, self.tck, dx, dy)
z = atleast_2d(z)
z = transpose(z)
if self.fill_value is not None:
if any_out_of_bounds_x:
z[:, out_of_bounds_x] = self.fill_value
if any_out_of_bounds_y:
z[out_of_bounds_y, :] = self.fill_value
if len(z) == 1:
z = z[0]
return array(z)
def _check_broadcast_up_to(arr_from, shape_to, name):
"""Helper to check that arr_from broadcasts up to shape_to"""
shape_from = arr_from.shape
if len(shape_to) >= len(shape_from):
for t, f in zip(shape_to[::-1], shape_from[::-1]):
if f != 1 and f != t:
break
else: # all checks pass, do the upcasting that we need later
if arr_from.size != 1 and arr_from.shape != shape_to:
arr_from = np.ones(shape_to, arr_from.dtype) * arr_from
return arr_from.ravel()
# at least one check failed
raise ValueError('%s argument must be able to broadcast up '
'to shape %s but had shape %s'
% (name, shape_to, shape_from))
def _do_extrapolate(fill_value):
"""Helper to check if fill_value == "extrapolate" without warnings"""
return (isinstance(fill_value, string_types) and
fill_value == 'extrapolate')
class interp1d(_Interpolator1D):
"""
Interpolate a 1-D function.
`x` and `y` are arrays of values used to approximate some function f:
``y = f(x)``. This class returns a function whose call method uses
interpolation to find the value of new points.
Note that calling `interp1d` with NaNs present in input values results in
undefined behaviour.
Parameters
----------
x : (N,) array_like
A 1-D array of real values.
y : (...,N,...) array_like
A N-D array of real values. The length of `y` along the interpolation
axis must be equal to the length of `x`.
kind : str or int, optional
Specifies the kind of interpolation as a string
('linear', 'nearest', 'zero', 'slinear', 'quadratic, 'cubic'
where 'slinear', 'quadratic' and 'cubic' refer to a spline
interpolation of first, second or third order) or as an integer
specifying the order of the spline interpolator to use.
Default is 'linear'.
axis : int, optional
Specifies the axis of `y` along which to interpolate.
Interpolation defaults to the last axis of `y`.
copy : bool, optional
If True, the class makes internal copies of x and y.
If False, references to `x` and `y` are used. The default is to copy.
bounds_error : bool, optional
If True, a ValueError is raised any time interpolation is attempted on
a value outside of the range of x (where extrapolation is
necessary). If False, out of bounds values are assigned `fill_value`.
By default, an error is raised unless `fill_value="extrapolate"`.
fill_value : array-like or (array-like, array_like) or "extrapolate", optional
- if a ndarray (or float), this value will be used to fill in for
requested points outside of the data range. If not provided, then
the default is NaN. The array-like must broadcast properly to the
dimensions of the non-interpolation axes.
- If a two-element tuple, then the first element is used as a
fill value for ``x_new < x[0]`` and the second element is used for
``x_new > x[-1]``. Anything that is not a 2-element tuple (e.g.,
list or ndarray, regardless of shape) is taken to be a single
array-like argument meant to be used for both bounds as
``below, above = fill_value, fill_value``.
.. versionadded:: 0.17.0
- If "extrapolate", then points outside the data range will be
extrapolated.
.. versionadded:: 0.17.0
assume_sorted : bool, optional
If False, values of `x` can be in any order and they are sorted first.
If True, `x` has to be an array of monotonically increasing values.
Methods
-------
__call__
See Also
--------
splrep, splev
Spline interpolation/smoothing based on FITPACK.
UnivariateSpline : An object-oriented wrapper of the FITPACK routines.
interp2d : 2-D interpolation
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy import interpolate
>>> x = np.arange(0, 10)
>>> y = np.exp(-x/3.0)
>>> f = interpolate.interp1d(x, y)
>>> xnew = np.arange(0, 9, 0.1)
>>> ynew = f(xnew) # use interpolation function returned by `interp1d`
>>> plt.plot(x, y, 'o', xnew, ynew, '-')
>>> plt.show()
"""
def __init__(self, x, y, kind='linear', axis=-1,
copy=True, bounds_error=None, fill_value=np.nan,
assume_sorted=False):
""" Initialize a 1D linear interpolation class."""
_Interpolator1D.__init__(self, x, y, axis=axis)
self.bounds_error = bounds_error # used by fill_value setter
self.copy = copy
if kind in ['zero', 'slinear', 'quadratic', 'cubic']:
order = {'nearest': 0, 'zero': 0, 'slinear': 1,
'quadratic': 2, 'cubic': 3}[kind]
kind = 'spline'
elif isinstance(kind, int):
order = kind
kind = 'spline'
elif kind not in ('linear', 'nearest'):
raise NotImplementedError("%s is unsupported: Use fitpack "
"routines for other types." % kind)
x = array(x, copy=self.copy)
y = array(y, copy=self.copy)
if not assume_sorted:
ind = np.argsort(x)
x = x[ind]
y = np.take(y, ind, axis=axis)
if x.ndim != 1:
raise ValueError("the x array must have exactly one dimension.")
if y.ndim == 0:
raise ValueError("the y array must have at least one dimension.")
# Force-cast y to a floating-point type, if it's not yet one
if not issubclass(y.dtype.type, np.inexact):
y = y.astype(np.float_)
# Backward compatibility
self.axis = axis % y.ndim
# Interpolation goes internally along the first axis
self.y = y
self._y = self._reshape_yi(self.y)
self.x = x
del y, x # clean up namespace to prevent misuse; use attributes
self._kind = kind
self.fill_value = fill_value # calls the setter, can modify bounds_err
# Adjust to interpolation kind; store reference to *unbound*
# interpolation methods, in order to avoid circular references to self
# stored in the bound instance methods, and therefore delayed garbage
# collection. See: http://docs.python.org/2/reference/datamodel.html
if kind in ('linear', 'nearest'):
# Make a "view" of the y array that is rotated to the interpolation
# axis.
minval = 2
if kind == 'nearest':
# Do division before addition to prevent possible integer
# overflow
self.x_bds = self.x / 2.0
self.x_bds = self.x_bds[1:] + self.x_bds[:-1]
self._call = self.__class__._call_nearest
else:
# Check if we can delegate to numpy.interp (2x-10x faster).
cond = self.x.dtype == np.float_ and self.y.dtype == np.float_
cond = cond and self.y.ndim == 1
cond = cond and not _do_extrapolate(fill_value)
if cond:
self._call = self.__class__._call_linear_np
else:
self._call = self.__class__._call_linear
else:
minval = order + 1
rewrite_nan = False
xx, yy = self.x, self._y
if order > 1:
# Quadratic or cubic spline. If input contains even a single
# nan, then the output is all nans. We cannot just feed data
# with nans to make_interp_spline because it calls LAPACK.
# So, we make up a bogus x and y with no nans and use it
# to get the correct shape of the output, which we then fill
# with nans.
# For slinear or zero order spline, we just pass nans through.
if np.isnan(self.x).any():
xx = np.linspace(min(self.x), max(self.x), len(self.x))
rewrite_nan = True
if np.isnan(self._y).any():
yy = np.ones_like(self._y)
rewrite_nan = True
self._spline = make_interp_spline(xx, yy, k=order,
check_finite=False)
if rewrite_nan:
self._call = self.__class__._call_nan_spline
else:
self._call = self.__class__._call_spline
if len(self.x) < minval:
raise ValueError("x and y arrays must have at "
"least %d entries" % minval)
@property
def fill_value(self):
# backwards compat: mimic a public attribute
return self._fill_value_orig
@fill_value.setter
def fill_value(self, fill_value):
# extrapolation only works for nearest neighbor and linear methods
if _do_extrapolate(fill_value):
if self.bounds_error:
raise ValueError("Cannot extrapolate and raise "
"at the same time.")
self.bounds_error = False
self._extrapolate = True
else:
broadcast_shape = (self.y.shape[:self.axis] +
self.y.shape[self.axis + 1:])
if len(broadcast_shape) == 0:
broadcast_shape = (1,)
# it's either a pair (_below_range, _above_range) or a single value
# for both above and below range
if isinstance(fill_value, tuple) and len(fill_value) == 2:
below_above = [np.asarray(fill_value[0]),
np.asarray(fill_value[1])]
names = ('fill_value (below)', 'fill_value (above)')
for ii in range(2):
below_above[ii] = _check_broadcast_up_to(
below_above[ii], broadcast_shape, names[ii])
else:
fill_value = np.asarray(fill_value)
below_above = [_check_broadcast_up_to(
fill_value, broadcast_shape, 'fill_value')] * 2
self._fill_value_below, self._fill_value_above = below_above
self._extrapolate = False
if self.bounds_error is None:
self.bounds_error = True
# backwards compat: fill_value was a public attr; make it writeable
self._fill_value_orig = fill_value
def _call_linear_np(self, x_new):
# Note that out-of-bounds values are taken care of in self._evaluate
return np.interp(x_new, self.x, self.y)
def _call_linear(self, x_new):
# 2. Find where in the orignal data, the values to interpolate
# would be inserted.
# Note: If x_new[n] == x[m], then m is returned by searchsorted.
x_new_indices = searchsorted(self.x, x_new)
# 3. Clip x_new_indices so that they are within the range of
# self.x indices and at least 1. Removes mis-interpolation
# of x_new[n] = x[0]
x_new_indices = x_new_indices.clip(1, len(self.x) - 1).astype(int)
# 4. Calculate the slope of regions that each x_new value falls in.
lo = x_new_indices - 1
hi = x_new_indices
x_lo = self.x[lo]
x_hi = self.x[hi]
y_lo = self._y[lo]
y_hi = self._y[hi]
# Note that the following two expressions rely on the specifics of the
# broadcasting semantics.
slope = (y_hi - y_lo) / (x_hi - x_lo)[:, None]
# 5. Calculate the actual value for each entry in x_new.
y_new = slope * (x_new - x_lo)[:, None] + y_lo
return y_new
def _call_nearest(self, x_new):
""" Find nearest neighbour interpolated y_new = f(x_new)."""
# 2. Find where in the averaged data the values to interpolate
# would be inserted.
# Note: use side='left' (right) to searchsorted() to define the
# halfway point to be nearest to the left (right) neighbour
x_new_indices = searchsorted(self.x_bds, x_new, side='left')
# 3. Clip x_new_indices so that they are within the range of x indices.
x_new_indices = x_new_indices.clip(0, len(self.x) - 1).astype(intp)
# 4. Calculate the actual value for each entry in x_new.
y_new = self._y[x_new_indices]
return y_new
def _call_spline(self, x_new):
return self._spline(x_new)
def _call_nan_spline(self, x_new):
out = self._spline(x_new)
out[...] = np.nan
return out
def _evaluate(self, x_new):
# 1. Handle values in x_new that are outside of x. Throw error,
# or return a list of mask array indicating the outofbounds values.
# The behavior is set by the bounds_error variable.
x_new = asarray(x_new)
y_new = self._call(self, x_new)
if not self._extrapolate:
below_bounds, above_bounds = self._check_bounds(x_new)
if len(y_new) > 0:
# Note fill_value must be broadcast up to the proper size
# and flattened to work here
y_new[below_bounds] = self._fill_value_below
y_new[above_bounds] = self._fill_value_above
return y_new
def _check_bounds(self, x_new):
"""Check the inputs for being in the bounds of the interpolated data.
Parameters
----------
x_new : array
Returns
-------
out_of_bounds : bool array
The mask on x_new of values that are out of the bounds.
"""
# If self.bounds_error is True, we raise an error if any x_new values
# fall outside the range of x. Otherwise, we return an array indicating
# which values are outside the boundary region.
below_bounds = x_new < self.x[0]
above_bounds = x_new > self.x[-1]
# !! Could provide more information about which values are out of bounds
if self.bounds_error and below_bounds.any():
raise ValueError("A value in x_new is below the interpolation "
"range.")
if self.bounds_error and above_bounds.any():
raise ValueError("A value in x_new is above the interpolation "
"range.")
# !! Should we emit a warning if some values are out of bounds?
# !! matlab does not.
return below_bounds, above_bounds
class _PPolyBase(object):
"""Base class for piecewise polynomials."""
__slots__ = ('c', 'x', 'extrapolate', 'axis')
def __init__(self, c, x, extrapolate=None, axis=0):
self.c = np.asarray(c)
self.x = np.ascontiguousarray(x, dtype=np.float64)
if extrapolate is None:
extrapolate = True
elif extrapolate != 'periodic':
extrapolate = bool(extrapolate)
self.extrapolate = extrapolate
if not (0 <= axis < self.c.ndim - 1):
raise ValueError("%s must be between 0 and %s" % (axis, c.ndim - 1))
self.axis = axis
if axis != 0:
# roll the interpolation axis to be the first one in self.c
# More specifically, the target shape for self.c is (k, m, ...),
# and axis !=0 means that we have c.shape (..., k, m, ...)
# ^
# axis
# So we roll two of them.
self.c = np.rollaxis(self.c, axis + 1)
self.c = np.rollaxis(self.c, axis + 1)
if self.x.ndim != 1:
raise ValueError("x must be 1-dimensional")
if self.x.size < 2:
raise ValueError("at least 2 breakpoints are needed")
if self.c.ndim < 2:
raise ValueError("c must have at least 2 dimensions")
if self.c.shape[0] == 0:
raise ValueError("polynomial must be at least of order 0")
if self.c.shape[1] != self.x.size - 1:
raise ValueError("number of coefficients != len(x)-1")
dx = np.diff(self.x)
if not (np.all(dx >= 0) or np.all(dx <= 0)):
raise ValueError("`x` must be strictly increasing or decreasing.")
dtype = self._get_dtype(self.c.dtype)
self.c = np.ascontiguousarray(self.c, dtype=dtype)
def _get_dtype(self, dtype):
if np.issubdtype(dtype, np.complexfloating) \
or np.issubdtype(self.c.dtype, np.complexfloating):
return np.complex_
else:
return np.float_
@classmethod
def construct_fast(cls, c, x, extrapolate=None, axis=0):
"""
Construct the piecewise polynomial without making checks.
Takes the same parameters as the constructor. Input arguments
`c` and `x` must be arrays of the correct shape and type. The
`c` array can only be of dtypes float and complex, and `x`
array must have dtype float.
"""
self = object.__new__(cls)
self.c = c
self.x = x
self.axis = axis
if extrapolate is None:
extrapolate = True
self.extrapolate = extrapolate
return self
def _ensure_c_contiguous(self):
"""
c and x may be modified by the user. The Cython code expects
that they are C contiguous.
"""
if not self.x.flags.c_contiguous:
self.x = self.x.copy()
if not self.c.flags.c_contiguous:
self.c = self.c.copy()
def extend(self, c, x, right=None):
"""
Add additional breakpoints and coefficients to the polynomial.
Parameters
----------
c : ndarray, size (k, m, ...)
Additional coefficients for polynomials in intervals. Note that
the first additional interval will be formed using one of the
`self.x` end points.
x : ndarray, size (m,)
Additional breakpoints. Must be sorted in the same order as
`self.x` and either to the right or to the left of the current
breakpoints.
right
Deprecated argument. Has no effect.
.. deprecated:: 0.19
"""
if right is not None:
warnings.warn("`right` is deprecated and will be removed.")
c = np.asarray(c)
x = np.asarray(x)
if c.ndim < 2:
raise ValueError("invalid dimensions for c")
if x.ndim != 1:
raise ValueError("invalid dimensions for x")
if x.shape[0] != c.shape[1]:
raise ValueError("x and c have incompatible sizes")
if c.shape[2:] != self.c.shape[2:] or c.ndim != self.c.ndim:
raise ValueError("c and self.c have incompatible shapes")
if c.size == 0:
return
dx = np.diff(x)
if not (np.all(dx >= 0) or np.all(dx <= 0)):
raise ValueError("`x` is not sorted.")
if self.x[-1] >= self.x[0]:
if not x[-1] >= x[0]:
raise ValueError("`x` is in the different order "
"than `self.x`.")
if x[0] >= self.x[-1]:
action = 'append'
elif x[-1] <= self.x[0]:
action = 'prepend'
else:
raise ValueError("`x` is neither on the left or on the right "
"from `self.x`.")
else:
if not x[-1] <= x[0]:
raise ValueError("`x` is in the different order "
"than `self.x`.")
if x[0] <= self.x[-1]:
action = 'append'
elif x[-1] >= self.x[0]:
action = 'prepend'
else:
raise ValueError("`x` is neither on the left or on the right "
"from `self.x`.")
dtype = self._get_dtype(c.dtype)
k2 = max(c.shape[0], self.c.shape[0])
c2 = np.zeros((k2, self.c.shape[1] + c.shape[1]) + self.c.shape[2:],
dtype=dtype)
if action == 'append':
c2[k2 - self.c.shape[0]:, :self.c.shape[1]] = self.c
c2[k2 - c.shape[0]:, self.c.shape[1]:] = c
self.x = np.r_[self.x, x]
elif action == 'prepend':
c2[k2 - self.c.shape[0]:, :c.shape[1]] = c
c2[k2 - c.shape[0]:, c.shape[1]:] = self.c
self.x = np.r_[x, self.x]
self.c = c2
def __call__(self, x, nu=0, extrapolate=None):
"""
Evaluate the piecewise polynomial or its derivative.
Parameters
----------
x : array_like
Points to evaluate the interpolant at.
nu : int, optional
Order of derivative to evaluate. Must be non-negative.
extrapolate : {bool, 'periodic', None}, optional
If bool, determines whether to extrapolate to out-of-bounds points
based on first and last intervals, or to return NaNs.
If 'periodic', periodic extrapolation is used.
If None (default), use `self.extrapolate`.
Returns
-------
y : array_like
Interpolated values. Shape is determined by replacing
the interpolation axis in the original array with the shape of x.
Notes
-----
Derivatives are evaluated piecewise for each polynomial
segment, even if the polynomial is not differentiable at the
breakpoints. The polynomial intervals are considered half-open,
``[a, b)``, except for the last interval which is closed
``[a, b]``.
"""
if extrapolate is None:
extrapolate = self.extrapolate
x = np.asarray(x)
x_shape, x_ndim = x.shape, x.ndim
x = np.ascontiguousarray(x.ravel(), dtype=np.float_)
# With periodic extrapolation we map x to the segment
# [self.x[0], self.x[-1]].
if extrapolate == 'periodic':
x = self.x[0] + (x - self.x[0]) % (self.x[-1] - self.x[0])
extrapolate = False
out = np.empty((len(x), prod(self.c.shape[2:])), dtype=self.c.dtype)
self._ensure_c_contiguous()
self._evaluate(x, nu, extrapolate, out)
out = out.reshape(x_shape + self.c.shape[2:])
if self.axis != 0:
# transpose to move the calculated values to the interpolation axis
l = list(range(out.ndim))
l = l[x_ndim:x_ndim + self.axis] + l[:x_ndim] + l[x_ndim + self.axis:]
out = out.transpose(l)
return out
class PPoly(_PPolyBase):
"""
Piecewise polynomial in terms of coefficients and breakpoints
The polynomial between ``x[i]`` and ``x[i + 1]`` is written in the
local power basis::
S = sum(c[m, i] * (xp - x[i])**(k-m) for m in range(k+1))
where ``k`` is the degree of the polynomial.
Parameters
----------
c : ndarray, shape (k, m, ...)
Polynomial coefficients, order `k` and `m` intervals
x : ndarray, shape (m+1,)
Polynomial breakpoints. Must be sorted in either increasing or
decreasing order.
extrapolate : bool or 'periodic', optional
If bool, determines whether to extrapolate to out-of-bounds points
based on first and last intervals, or to return NaNs. If 'periodic',
periodic extrapolation is used. Default is True.
axis : int, optional
Interpolation axis. Default is zero.
Attributes
----------
x : ndarray
Breakpoints.
c : ndarray
Coefficients of the polynomials. They are reshaped
to a 3-dimensional array with the last dimension representing
the trailing dimensions of the original coefficient array.
axis : int
Interpolation axis.
Methods
-------
__call__
derivative
antiderivative
integrate
solve
roots
extend
from_spline
from_bernstein_basis
construct_fast
See also
--------
BPoly : piecewise polynomials in the Bernstein basis
Notes
-----
High-order polynomials in the power basis can be numerically
unstable. Precision problems can start to appear for orders
larger than 20-30.
"""
def _evaluate(self, x, nu, extrapolate, out):
_ppoly.evaluate(self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
self.x, x, nu, bool(extrapolate), out)
def derivative(self, nu=1):
"""
Construct a new piecewise polynomial representing the derivative.
Parameters
----------
nu : int, optional
Order of derivative to evaluate. Default is 1, i.e. compute the
first derivative. If negative, the antiderivative is returned.
Returns
-------
pp : PPoly
Piecewise polynomial of order k2 = k - n representing the derivative
of this polynomial.
Notes
-----
Derivatives are evaluated piecewise for each polynomial
segment, even if the polynomial is not differentiable at the
breakpoints. The polynomial intervals are considered half-open,
``[a, b)``, except for the last interval which is closed
``[a, b]``.
"""
if nu < 0:
return self.antiderivative(-nu)
# reduce order
if nu == 0:
c2 = self.c.copy()
else:
c2 = self.c[:-nu, :].copy()
if c2.shape[0] == 0:
# derivative of order 0 is zero
c2 = np.zeros((1,) + c2.shape[1:], dtype=c2.dtype)
# multiply by the correct rising factorials
factor = spec.poch(np.arange(c2.shape[0], 0, -1), nu)
c2 *= factor[(slice(None),) + (None,) * (c2.ndim - 1)]
# construct a compatible polynomial
return self.construct_fast(c2, self.x, self.extrapolate, self.axis)
def antiderivative(self, nu=1):
"""
Construct a new piecewise polynomial representing the antiderivative.
Antiderivative is also the indefinite integral of the function,
and derivative is its inverse operation.
Parameters
----------
nu : int, optional
Order of antiderivative to evaluate. Default is 1, i.e. compute
the first integral. If negative, the derivative is returned.
Returns
-------
pp : PPoly
Piecewise polynomial of order k2 = k + n representing
the antiderivative of this polynomial.
Notes
-----
The antiderivative returned by this function is continuous and
continuously differentiable to order n-1, up to floating point
rounding error.
If antiderivative is computed and ``self.extrapolate='periodic'``,
it will be set to False for the returned instance. This is done because
the antiderivative is no longer periodic and its correct evaluation
outside of the initially given x interval is difficult.
"""
if nu <= 0:
return self.derivative(-nu)
c = np.zeros((self.c.shape[0] + nu, self.c.shape[1]) + self.c.shape[2:],
dtype=self.c.dtype)
c[:-nu] = self.c
# divide by the correct rising factorials
factor = spec.poch(np.arange(self.c.shape[0], 0, -1), nu)
c[:-nu] /= factor[(slice(None),) + (None,) * (c.ndim - 1)]
# fix continuity of added degrees of freedom
self._ensure_c_contiguous()
_ppoly.fix_continuity(c.reshape(c.shape[0], c.shape[1], -1),
self.x, nu - 1)
if self.extrapolate == 'periodic':
extrapolate = False
else:
extrapolate = self.extrapolate
# construct a compatible polynomial
return self.construct_fast(c, self.x, extrapolate, self.axis)
def integrate(self, a, b, extrapolate=None):
"""
Compute a definite integral over a piecewise polynomial.
Parameters
----------
a : float
Lower integration bound
b : float
Upper integration bound
extrapolate : {bool, 'periodic', None}, optional
If bool, determines whether to extrapolate to out-of-bounds points
based on first and last intervals, or to return NaNs.
If 'periodic', periodic extrapolation is used.
If None (default), use `self.extrapolate`.
Returns
-------
ig : array_like
Definite integral of the piecewise polynomial over [a, b]
"""
if extrapolate is None:
extrapolate = self.extrapolate
# Swap integration bounds if needed
sign = 1
if b < a:
a, b = b, a
sign = -1
range_int = np.empty((prod(self.c.shape[2:]),), dtype=self.c.dtype)
self._ensure_c_contiguous()
# Compute the integral.
if extrapolate == 'periodic':
# Split the integral into the part over period (can be several
# of them) and the remaining part.
xs, xe = self.x[0], self.x[-1]
period = xe - xs
interval = b - a
n_periods, left = divmod(interval, period)
if n_periods > 0:
_ppoly.integrate(
self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
self.x, xs, xe, False, out=range_int)
range_int *= n_periods
else:
range_int.fill(0)
# Map a to [xs, xe], b is always a + left.
a = xs + (a - xs) % period
b = a + left
# If b <= xe then we need to integrate over [a, b], otherwise
# over [a, xe] and from xs to what is remained.
remainder_int = np.empty_like(range_int)
if b <= xe:
_ppoly.integrate(
self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
self.x, a, b, False, out=remainder_int)
range_int += remainder_int
else:
_ppoly.integrate(
self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
self.x, a, xe, False, out=remainder_int)
range_int += remainder_int
_ppoly.integrate(
self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
self.x, xs, xs + left + a - xe, False, out=remainder_int)
range_int += remainder_int
else:
_ppoly.integrate(
self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
self.x, a, b, bool(extrapolate), out=range_int)
# Return
range_int *= sign
return range_int.reshape(self.c.shape[2:])
def solve(self, y=0., discontinuity=True, extrapolate=None):
"""
Find real solutions of the the equation ``pp(x) == y``.
Parameters
----------
y : float, optional
Right-hand side. Default is zero.
discontinuity : bool, optional
Whether to report sign changes across discontinuities at
breakpoints as roots.
extrapolate : {bool, 'periodic', None}, optional
If bool, determines whether to return roots from the polynomial
extrapolated based on first and last intervals, 'periodic' works
the same as False. If None (default), use `self.extrapolate`.
Returns
-------
roots : ndarray
Roots of the polynomial(s).
If the PPoly object describes multiple polynomials, the
return value is an object array whose each element is an
ndarray containing the roots.
Notes
-----
This routine works only on real-valued polynomials.
If the piecewise polynomial contains sections that are
identically zero, the root list will contain the start point
of the corresponding interval, followed by a ``nan`` value.
If the polynomial is discontinuous across a breakpoint, and
there is a sign change across the breakpoint, this is reported
if the `discont` parameter is True.
Examples
--------
Finding roots of ``[x**2 - 1, (x - 1)**2]`` defined on intervals
``[-2, 1], [1, 2]``:
>>> from scipy.interpolate import PPoly
>>> pp = PPoly(np.array([[1, -4, 3], [1, 0, 0]]).T, [-2, 1, 2])
>>> pp.roots()
array([-1., 1.])
"""
if extrapolate is None:
extrapolate = self.extrapolate
self._ensure_c_contiguous()
if np.issubdtype(self.c.dtype, np.complexfloating):
raise ValueError("Root finding is only for "
"real-valued polynomials")
y = float(y)
r = _ppoly.real_roots(self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
self.x, y, bool(discontinuity),
bool(extrapolate))
if self.c.ndim == 2:
return r[0]
else:
r2 = np.empty(prod(self.c.shape[2:]), dtype=object)
# this for-loop is equivalent to ``r2[...] = r``, but that's broken
# in numpy 1.6.0
for ii, root in enumerate(r):
r2[ii] = root
return r2.reshape(self.c.shape[2:])
def roots(self, discontinuity=True, extrapolate=None):
"""
Find real roots of the the piecewise polynomial.
Parameters
----------
discontinuity : bool, optional
Whether to report sign changes across discontinuities at
breakpoints as roots.
extrapolate : {bool, 'periodic', None}, optional
If bool, determines whether to return roots from the polynomial
extrapolated based on first and last intervals, 'periodic' works
the same as False. If None (default), use `self.extrapolate`.
Returns
-------
roots : ndarray
Roots of the polynomial(s).
If the PPoly object describes multiple polynomials, the
return value is an object array whose each element is an
ndarray containing the roots.
See Also
--------
PPoly.solve
"""
return self.solve(0, discontinuity, extrapolate)
@classmethod
def from_spline(cls, tck, extrapolate=None):
"""
Construct a piecewise polynomial from a spline
Parameters
----------
tck
A spline, as returned by `splrep` or a BSpline object.
extrapolate : bool or 'periodic', optional
If bool, determines whether to extrapolate to out-of-bounds points
based on first and last intervals, or to return NaNs.
If 'periodic', periodic extrapolation is used. Default is True.
"""
if isinstance(tck, BSpline):
t, c, k = tck.tck
if extrapolate is None:
extrapolate = tck.extrapolate
else:
t, c, k = tck
cvals = np.empty((k + 1, len(t) - 1), dtype=c.dtype)
for m in xrange(k, -1, -1):
y = fitpack.splev(t[:-1], tck, der=m)
cvals[k - m, :] = y / spec.gamma(m + 1)
return cls.construct_fast(cvals, t, extrapolate)
@classmethod
def from_bernstein_basis(cls, bp, extrapolate=None):
"""
Construct a piecewise polynomial in the power basis
from a polynomial in Bernstein basis.
Parameters
----------
bp : BPoly
A Bernstein basis polynomial, as created by BPoly
extrapolate : bool or 'periodic', optional
If bool, determines whether to extrapolate to out-of-bounds points
based on first and last intervals, or to return NaNs.
If 'periodic', periodic extrapolation is used. Default is True.
"""
dx = np.diff(bp.x)
k = bp.c.shape[0] - 1 # polynomial order
rest = (None,) * (bp.c.ndim - 2)
c = np.zeros_like(bp.c)
for a in range(k + 1):
factor = (-1) ** a * comb(k, a) * bp.c[a]
for s in range(a, k + 1):
val = comb(k - a, s - a) * (-1) ** s
c[k - s] += factor * val / dx[(slice(None),) + rest] ** s
if extrapolate is None:
extrapolate = bp.extrapolate
return cls.construct_fast(c, bp.x, extrapolate, bp.axis)
class BPoly(_PPolyBase):
"""Piecewise polynomial in terms of coefficients and breakpoints.
The polynomial between ``x[i]`` and ``x[i + 1]`` is written in the
Bernstein polynomial basis::
S = sum(c[a, i] * b(a, k; x) for a in range(k+1)),
where ``k`` is the degree of the polynomial, and::
b(a, k; x) = binom(k, a) * t**a * (1 - t)**(k - a),
with ``t = (x - x[i]) / (x[i+1] - x[i])`` and ``binom`` is the binomial
coefficient.
Parameters
----------
c : ndarray, shape (k, m, ...)
Polynomial coefficients, order `k` and `m` intervals
x : ndarray, shape (m+1,)
Polynomial breakpoints. Must be sorted in either increasing or
decreasing order.
extrapolate : bool, optional
If bool, determines whether to extrapolate to out-of-bounds points
based on first and last intervals, or to return NaNs. If 'periodic',
periodic extrapolation is used. Default is True.
axis : int, optional
Interpolation axis. Default is zero.
Attributes
----------
x : ndarray
Breakpoints.
c : ndarray
Coefficients of the polynomials. They are reshaped
to a 3-dimensional array with the last dimension representing
the trailing dimensions of the original coefficient array.
axis : int
Interpolation axis.
Methods
-------
__call__
extend
derivative
antiderivative
integrate
construct_fast
from_power_basis
from_derivatives
See also
--------
PPoly : piecewise polynomials in the power basis
Notes
-----
Properties of Bernstein polynomials are well documented in the literature.
Here's a non-exhaustive list:
.. [1] http://en.wikipedia.org/wiki/Bernstein_polynomial
.. [2] Kenneth I. Joy, Bernstein polynomials,
http://www.idav.ucdavis.edu/education/CAGDNotes/Bernstein-Polynomials.pdf
.. [3] E. H. Doha, A. H. Bhrawy, and M. A. Saker, Boundary Value Problems,
vol 2011, article ID 829546, :doi:`10.1155/2011/829543`.
Examples
--------
>>> from scipy.interpolate import BPoly
>>> x = [0, 1]
>>> c = [[1], [2], [3]]
>>> bp = BPoly(c, x)
This creates a 2nd order polynomial
.. math::
B(x) = 1 \\times b_{0, 2}(x) + 2 \\times b_{1, 2}(x) + 3 \\times b_{2, 2}(x) \\\\
= 1 \\times (1-x)^2 + 2 \\times 2 x (1 - x) + 3 \\times x^2
"""
def _evaluate(self, x, nu, extrapolate, out):
_ppoly.evaluate_bernstein(
self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
self.x, x, nu, bool(extrapolate), out)
def derivative(self, nu=1):
"""
Construct a new piecewise polynomial representing the derivative.
Parameters
----------
nu : int, optional
Order of derivative to evaluate. Default is 1, i.e. compute the
first derivative. If negative, the antiderivative is returned.
Returns
-------
bp : BPoly
Piecewise polynomial of order k - nu representing the derivative of
this polynomial.
"""
if nu < 0:
return self.antiderivative(-nu)
if nu > 1:
bp = self
for k in range(nu):
bp = bp.derivative()
return bp
# reduce order
if nu == 0:
c2 = self.c.copy()
else:
# For a polynomial
# B(x) = \sum_{a=0}^{k} c_a b_{a, k}(x),
# we use the fact that
# b'_{a, k} = k ( b_{a-1, k-1} - b_{a, k-1} ),
# which leads to
# B'(x) = \sum_{a=0}^{k-1} (c_{a+1} - c_a) b_{a, k-1}
#
# finally, for an interval [y, y + dy] with dy != 1,
# we need to correct for an extra power of dy
rest = (None,) * (self.c.ndim - 2)
k = self.c.shape[0] - 1
dx = np.diff(self.x)[(None, slice(None)) + rest]
c2 = k * np.diff(self.c, axis=0) / dx
if c2.shape[0] == 0:
# derivative of order 0 is zero
c2 = np.zeros((1,) + c2.shape[1:], dtype=c2.dtype)
# construct a compatible polynomial
return self.construct_fast(c2, self.x, self.extrapolate, self.axis)
def antiderivative(self, nu=1):
"""
Construct a new piecewise polynomial representing the antiderivative.
Parameters
----------
nu : int, optional
Order of antiderivative to evaluate. Default is 1, i.e. compute
the first integral. If negative, the derivative is returned.
Returns
-------
bp : BPoly
Piecewise polynomial of order k + nu representing the
antiderivative of this polynomial.
Notes
-----
If antiderivative is computed and ``self.extrapolate='periodic'``,
it will be set to False for the returned instance. This is done because
the antiderivative is no longer periodic and its correct evaluation
outside of the initially given x interval is difficult.
"""
if nu <= 0:
return self.derivative(-nu)
if nu > 1:
bp = self
for k in range(nu):
bp = bp.antiderivative()
return bp
# Construct the indefinite integrals on individual intervals
c, x = self.c, self.x
k = c.shape[0]
c2 = np.zeros((k + 1,) + c.shape[1:], dtype=c.dtype)
c2[1:, ...] = np.cumsum(c, axis=0) / k
delta = x[1:] - x[:-1]
c2 *= delta[(None, slice(None)) + (None,) * (c.ndim - 2)]
# Now fix continuity: on the very first interval, take the integration
# constant to be zero; on an interval [x_j, x_{j+1}) with j>0,
# the integration constant is then equal to the jump of the `bp` at x_j.
# The latter is given by the coefficient of B_{n+1, n+1}
# *on the previous interval* (other B. polynomials are zero at the
# breakpoint). Finally, use the fact that BPs form a partition of unity.
c2[:, 1:] += np.cumsum(c2[k, :], axis=0)[:-1]
if self.extrapolate == 'periodic':
extrapolate = False
else:
extrapolate = self.extrapolate
return self.construct_fast(c2, x, extrapolate, axis=self.axis)
def integrate(self, a, b, extrapolate=None):
"""
Compute a definite integral over a piecewise polynomial.
Parameters
----------
a : float
Lower integration bound
b : float
Upper integration bound
extrapolate : {bool, 'periodic', None}, optional
Whether to extrapolate to out-of-bounds points based on first
and last intervals, or to return NaNs. If 'periodic', periodic
extrapolation is used. If None (default), use `self.extrapolate`.
Returns
-------
array_like
Definite integral of the piecewise polynomial over [a, b]
"""
# XXX: can probably use instead the fact that
# \int_0^{1} B_{j, n}(x) \dx = 1/(n+1)
ib = self.antiderivative()
if extrapolate is None:
extrapolate = self.extrapolate
# ib.extrapolate shouldn't be 'periodic', it is converted to
# False for 'periodic. in antiderivative() call.
if extrapolate != 'periodic':
ib.extrapolate = extrapolate
if extrapolate == 'periodic':
# Split the integral into the part over period (can be several
# of them) and the remaining part.
# For simplicity and clarity convert to a <= b case.
if a <= b:
sign = 1
else:
a, b = b, a
sign = -1
xs, xe = self.x[0], self.x[-1]
period = xe - xs
interval = b - a
n_periods, left = divmod(interval, period)
res = n_periods * (ib(xe) - ib(xs))
# Map a and b to [xs, xe].
a = xs + (a - xs) % period
b = a + left
# If b <= xe then we need to integrate over [a, b], otherwise
# over [a, xe] and from xs to what is remained.
if b <= xe:
res += ib(b) - ib(a)
else:
res += ib(xe) - ib(a) + ib(xs + left + a - xe) - ib(xs)
return sign * res
else:
return ib(b) - ib(a)
def extend(self, c, x, right=None):
k = max(self.c.shape[0], c.shape[0])
self.c = self._raise_degree(self.c, k - self.c.shape[0])
c = self._raise_degree(c, k - c.shape[0])
return _PPolyBase.extend(self, c, x, right)
extend.__doc__ = _PPolyBase.extend.__doc__
@classmethod
def from_power_basis(cls, pp, extrapolate=None):
"""
Construct a piecewise polynomial in Bernstein basis
from a power basis polynomial.
Parameters
----------
pp : PPoly
A piecewise polynomial in the power basis
extrapolate : bool or 'periodic', optional
If bool, determines whether to extrapolate to out-of-bounds points
based on first and last intervals, or to return NaNs.
If 'periodic', periodic extrapolation is used. Default is True.
"""
dx = np.diff(pp.x)
k = pp.c.shape[0] - 1 # polynomial order
rest = (None,) * (pp.c.ndim - 2)
c = np.zeros_like(pp.c)
for a in range(k + 1):
factor = pp.c[a] / comb(k, k - a) * dx[(slice(None),) + rest] ** (k - a)
for j in range(k - a, k + 1):
c[j] += factor * comb(j, k - a)
if extrapolate is None:
extrapolate = pp.extrapolate
return cls.construct_fast(c, pp.x, extrapolate, pp.axis)
@classmethod
def from_derivatives(cls, xi, yi, orders=None, extrapolate=None):
"""Construct a piecewise polynomial in the Bernstein basis,
compatible with the specified values and derivatives at breakpoints.
Parameters
----------
xi : array_like
sorted 1D array of x-coordinates
yi : array_like or list of array_likes
``yi[i][j]`` is the ``j``-th derivative known at ``xi[i]``
orders : None or int or array_like of ints. Default: None.
Specifies the degree of local polynomials. If not None, some
derivatives are ignored.
extrapolate : bool or 'periodic', optional
If bool, determines whether to extrapolate to out-of-bounds points
based on first and last intervals, or to return NaNs.
If 'periodic', periodic extrapolation is used. Default is True.
Notes
-----
If ``k`` derivatives are specified at a breakpoint ``x``, the
constructed polynomial is exactly ``k`` times continuously
differentiable at ``x``, unless the ``order`` is provided explicitly.
In the latter case, the smoothness of the polynomial at
the breakpoint is controlled by the ``order``.
Deduces the number of derivatives to match at each end
from ``order`` and the number of derivatives available. If
possible it uses the same number of derivatives from
each end; if the number is odd it tries to take the
extra one from y2. In any case if not enough derivatives
are available at one end or another it draws enough to
make up the total from the other end.
If the order is too high and not enough derivatives are available,
an exception is raised.
Examples
--------
>>> from scipy.interpolate import BPoly
>>> BPoly.from_derivatives([0, 1], [[1, 2], [3, 4]])
Creates a polynomial `f(x)` of degree 3, defined on `[0, 1]`
such that `f(0) = 1, df/dx(0) = 2, f(1) = 3, df/dx(1) = 4`
>>> BPoly.from_derivatives([0, 1, 2], [[0, 1], [0], [2]])
Creates a piecewise polynomial `f(x)`, such that
`f(0) = f(1) = 0`, `f(2) = 2`, and `df/dx(0) = 1`.
Based on the number of derivatives provided, the order of the
local polynomials is 2 on `[0, 1]` and 1 on `[1, 2]`.
Notice that no restriction is imposed on the derivatives at
`x = 1` and `x = 2`.
Indeed, the explicit form of the polynomial is::
f(x) = | x * (1 - x), 0 <= x < 1
| 2 * (x - 1), 1 <= x <= 2
So that f'(1-0) = -1 and f'(1+0) = 2
"""
xi = np.asarray(xi)
if len(xi) != len(yi):
raise ValueError("xi and yi need to have the same length")
if np.any(xi[1:] - xi[:1] <= 0):
raise ValueError("x coordinates are not in increasing order")
# number of intervals
m = len(xi) - 1
# global poly order is k-1, local orders are <=k and can vary
try:
k = max(len(yi[i]) + len(yi[i + 1]) for i in range(m))
except TypeError:
raise ValueError("Using a 1D array for y? Please .reshape(-1, 1).")
if orders is None:
orders = [None] * m
else:
if isinstance(orders, (integer_types, np.integer)):
orders = [orders] * m
k = max(k, max(orders))
if any(o <= 0 for o in orders):
raise ValueError("Orders must be positive.")
c = []
for i in range(m):
y1, y2 = yi[i], yi[i + 1]
if orders[i] is None:
n1, n2 = len(y1), len(y2)
else:
n = orders[i] + 1
n1 = min(n // 2, len(y1))
n2 = min(n - n1, len(y2))
n1 = min(n - n2, len(y2))
if n1 + n2 != n:
mesg = ("Point %g has %d derivatives, point %g"
" has %d derivatives, but order %d requested" % (
xi[i], len(y1), xi[i + 1], len(y2), orders[i]))
raise ValueError(mesg)
if not (n1 <= len(y1) and n2 <= len(y2)):
raise ValueError("`order` input incompatible with"
" length y1 or y2.")
b = BPoly._construct_from_derivatives(xi[i], xi[i + 1],
y1[:n1], y2[:n2])
if len(b) < k:
b = BPoly._raise_degree(b, k - len(b))
c.append(b)
c = np.asarray(c)
return cls(c.swapaxes(0, 1), xi, extrapolate)
@staticmethod
def _construct_from_derivatives(xa, xb, ya, yb):
"""Compute the coefficients of a polynomial in the Bernstein basis
given the values and derivatives at the edges.
Return the coefficients of a polynomial in the Bernstein basis
defined on `[xa, xb]` and having the values and derivatives at the
endpoints ``xa`` and ``xb`` as specified by ``ya`` and ``yb``.
The polynomial constructed is of the minimal possible degree, i.e.,
if the lengths of ``ya`` and ``yb`` are ``na`` and ``nb``, the degree
of the polynomial is ``na + nb - 1``.
Parameters
----------
xa : float
Left-hand end point of the interval
xb : float
Right-hand end point of the interval
ya : array_like
Derivatives at ``xa``. ``ya[0]`` is the value of the function, and
``ya[i]`` for ``i > 0`` is the value of the ``i``-th derivative.
yb : array_like
Derivatives at ``xb``.
Returns
-------
array
coefficient array of a polynomial having specified derivatives
Notes
-----
This uses several facts from life of Bernstein basis functions.
First of all,
.. math:: b'_{a, n} = n (b_{a-1, n-1} - b_{a, n-1})
If B(x) is a linear combination of the form
.. math:: B(x) = \sum_{a=0}^{n} c_a b_{a, n},
then :math: B'(x) = n \sum_{a=0}^{n-1} (c_{a+1} - c_{a}) b_{a, n-1}.
Iterating the latter one, one finds for the q-th derivative
.. math:: B^{q}(x) = n!/(n-q)! \sum_{a=0}^{n-q} Q_a b_{a, n-q},
with
.. math:: Q_a = \sum_{j=0}^{q} (-)^{j+q} comb(q, j) c_{j+a}
This way, only `a=0` contributes to :math: `B^{q}(x = xa)`, and
`c_q` are found one by one by iterating `q = 0, ..., na`.
At `x = xb` it's the same with `a = n - q`.
"""
ya, yb = np.asarray(ya), np.asarray(yb)
if ya.shape[1:] != yb.shape[1:]:
raise ValueError('ya and yb have incompatible dimensions.')
dta, dtb = ya.dtype, yb.dtype
if (np.issubdtype(dta, np.complexfloating) or
np.issubdtype(dtb, np.complexfloating)):
dt = np.complex_
else:
dt = np.float_
na, nb = len(ya), len(yb)
n = na + nb
c = np.empty((na + nb,) + ya.shape[1:], dtype=dt)
# compute coefficients of a polynomial degree na+nb-1
# walk left-to-right
for q in range(0, na):
c[q] = ya[q] / spec.poch(n - q, q) * (xb - xa) ** q
for j in range(0, q):
c[q] -= (-1) ** (j + q) * comb(q, j) * c[j]
# now walk right-to-left
for q in range(0, nb):
c[-q - 1] = yb[q] / spec.poch(n - q, q) * (-1) ** q * (xb - xa) ** q
for j in range(0, q):
c[-q - 1] -= (-1) ** (j + 1) * comb(q, j + 1) * c[-q + j]
return c
@staticmethod
def _raise_degree(c, d):
"""Raise a degree of a polynomial in the Bernstein basis.
Given the coefficients of a polynomial degree `k`, return (the
coefficients of) the equivalent polynomial of degree `k+d`.
Parameters
----------
c : array_like
coefficient array, 1D
d : integer
Returns
-------
array
coefficient array, 1D array of length `c.shape[0] + d`
Notes
-----
This uses the fact that a Bernstein polynomial `b_{a, k}` can be
identically represented as a linear combination of polynomials of
a higher degree `k+d`:
.. math:: b_{a, k} = comb(k, a) \sum_{j=0}^{d} b_{a+j, k+d} \
comb(d, j) / comb(k+d, a+j)
"""
if d == 0:
return c
k = c.shape[0] - 1
out = np.zeros((c.shape[0] + d,) + c.shape[1:], dtype=c.dtype)
for a in range(c.shape[0]):
f = c[a] * comb(k, a)
for j in range(d + 1):
out[a + j] += f * comb(d, j) / comb(k + d, a + j)
return out
class NdPPoly(object):
"""
Piecewise tensor product polynomial
The value at point `xp = (x', y', z', ...)` is evaluated by first
computing the interval indices `i` such that::
x[0][i[0]] <= x' < x[0][i[0]+1]
x[1][i[1]] <= y' < x[1][i[1]+1]
...
and then computing::
S = sum(c[k0-m0-1,...,kn-mn-1,i[0],...,i[n]]
* (xp[0] - x[0][i[0]])**m0
* ...
* (xp[n] - x[n][i[n]])**mn
for m0 in range(k[0]+1)
...
for mn in range(k[n]+1))
where ``k[j]`` is the degree of the polynomial in dimension j. This
representation is the piecewise multivariate power basis.
Parameters
----------
c : ndarray, shape (k0, ..., kn, m0, ..., mn, ...)
Polynomial coefficients, with polynomial order `kj` and
`mj+1` intervals for each dimension `j`.
x : ndim-tuple of ndarrays, shapes (mj+1,)
Polynomial breakpoints for each dimension. These must be
sorted in increasing order.
extrapolate : bool, optional
Whether to extrapolate to out-of-bounds points based on first
and last intervals, or to return NaNs. Default: True.
Attributes
----------
x : tuple of ndarrays
Breakpoints.
c : ndarray
Coefficients of the polynomials.
Methods
-------
__call__
construct_fast
See also
--------
PPoly : piecewise polynomials in 1D
Notes
-----
High-order polynomials in the power basis can be numerically
unstable.
"""
def __init__(self, c, x, extrapolate=None):
self.x = tuple(np.ascontiguousarray(v, dtype=np.float64) for v in x)
self.c = np.asarray(c)
if extrapolate is None:
extrapolate = True
self.extrapolate = bool(extrapolate)
ndim = len(self.x)
if any(v.ndim != 1 for v in self.x):
raise ValueError("x arrays must all be 1-dimensional")
if any(v.size < 2 for v in self.x):
raise ValueError("x arrays must all contain at least 2 points")
if c.ndim < 2 * ndim:
raise ValueError("c must have at least 2*len(x) dimensions")
if any(np.any(v[1:] - v[:-1] < 0) for v in self.x):
raise ValueError("x-coordinates are not in increasing order")
if any(a != b.size - 1 for a, b in zip(c.shape[ndim:2 * ndim], self.x)):
raise ValueError("x and c do not agree on the number of intervals")
dtype = self._get_dtype(self.c.dtype)
self.c = np.ascontiguousarray(self.c, dtype=dtype)
@classmethod
def construct_fast(cls, c, x, extrapolate=None):
"""
Construct the piecewise polynomial without making checks.
Takes the same parameters as the constructor. Input arguments
`c` and `x` must be arrays of the correct shape and type. The
`c` array can only be of dtypes float and complex, and `x`
array must have dtype float.
"""
self = object.__new__(cls)
self.c = c
self.x = x
if extrapolate is None:
extrapolate = True
self.extrapolate = extrapolate
return self
def _get_dtype(self, dtype):
if np.issubdtype(dtype, np.complexfloating) \
or np.issubdtype(self.c.dtype, np.complexfloating):
return np.complex_
else:
return np.float_
def _ensure_c_contiguous(self):
if not self.c.flags.c_contiguous:
self.c = self.c.copy()
if not isinstance(self.x, tuple):
self.x = tuple(self.x)
def __call__(self, x, nu=None, extrapolate=None):
"""
Evaluate the piecewise polynomial or its derivative
Parameters
----------
x : array-like
Points to evaluate the interpolant at.
nu : tuple, optional
Orders of derivatives to evaluate. Each must be non-negative.
extrapolate : bool, optional
Whether to extrapolate to out-of-bounds points based on first
and last intervals, or to return NaNs.
Returns
-------
y : array-like
Interpolated values. Shape is determined by replacing
the interpolation axis in the original array with the shape of x.
Notes
-----
Derivatives are evaluated piecewise for each polynomial
segment, even if the polynomial is not differentiable at the
breakpoints. The polynomial intervals are considered half-open,
``[a, b)``, except for the last interval which is closed
``[a, b]``.
"""
if extrapolate is None:
extrapolate = self.extrapolate
else:
extrapolate = bool(extrapolate)
ndim = len(self.x)
x = _ndim_coords_from_arrays(x)
x_shape = x.shape
x = np.ascontiguousarray(x.reshape(-1, x.shape[-1]), dtype=np.float_)
if nu is None:
nu = np.zeros((ndim,), dtype=np.intc)
else:
nu = np.asarray(nu, dtype=np.intc)
if nu.ndim != 1 or nu.shape[0] != ndim:
raise ValueError("invalid number of derivative orders nu")
dim1 = prod(self.c.shape[:ndim])
dim2 = prod(self.c.shape[ndim:2 * ndim])
dim3 = prod(self.c.shape[2 * ndim:])
ks = np.array(self.c.shape[:ndim], dtype=np.intc)
out = np.empty((x.shape[0], dim3), dtype=self.c.dtype)
self._ensure_c_contiguous()
_ppoly.evaluate_nd(self.c.reshape(dim1, dim2, dim3),
self.x,
ks,
x,
nu,
bool(extrapolate),
out)
return out.reshape(x_shape[:-1] + self.c.shape[2 * ndim:])
def _derivative_inplace(self, nu, axis):
"""
Compute 1D derivative along a selected dimension in-place
May result to non-contiguous c array.
"""
if nu < 0:
return self._antiderivative_inplace(-nu, axis)
ndim = len(self.x)
axis = axis % ndim
# reduce order
if nu == 0:
# noop
return
else:
sl = [slice(None)] * ndim
sl[axis] = slice(None, -nu, None)
c2 = self.c[sl]
if c2.shape[axis] == 0:
# derivative of order 0 is zero
shp = list(c2.shape)
shp[axis] = 1
c2 = np.zeros(shp, dtype=c2.dtype)
# multiply by the correct rising factorials
factor = spec.poch(np.arange(c2.shape[axis], 0, -1), nu)
sl = [None] * c2.ndim
sl[axis] = slice(None)
c2 *= factor[sl]
self.c = c2
def _antiderivative_inplace(self, nu, axis):
"""
Compute 1D antiderivative along a selected dimension
May result to non-contiguous c array.
"""
if nu <= 0:
return self._derivative_inplace(-nu, axis)
ndim = len(self.x)
axis = axis % ndim
perm = list(range(ndim))
perm[0], perm[axis] = perm[axis], perm[0]
perm = perm + list(range(ndim, self.c.ndim))
c = self.c.transpose(perm)
c2 = np.zeros((c.shape[0] + nu,) + c.shape[1:],
dtype=c.dtype)
c2[:-nu] = c
# divide by the correct rising factorials
factor = spec.poch(np.arange(c.shape[0], 0, -1), nu)
c2[:-nu] /= factor[(slice(None),) + (None,) * (c.ndim - 1)]
# fix continuity of added degrees of freedom
perm2 = list(range(c2.ndim))
perm2[1], perm2[ndim + axis] = perm2[ndim + axis], perm2[1]
c2 = c2.transpose(perm2)
c2 = c2.copy()
_ppoly.fix_continuity(c2.reshape(c2.shape[0], c2.shape[1], -1),
self.x[axis], nu - 1)
c2 = c2.transpose(perm2)
c2 = c2.transpose(perm)
# Done
self.c = c2
def derivative(self, nu):
"""
Construct a new piecewise polynomial representing the derivative.
Parameters
----------
nu : ndim-tuple of int
Order of derivatives to evaluate for each dimension.
If negative, the antiderivative is returned.
Returns
-------
pp : NdPPoly
Piecewise polynomial of orders (k[0] - nu[0], ..., k[n] - nu[n])
representing the derivative of this polynomial.
Notes
-----
Derivatives are evaluated piecewise for each polynomial
segment, even if the polynomial is not differentiable at the
breakpoints. The polynomial intervals in each dimension are
considered half-open, ``[a, b)``, except for the last interval
which is closed ``[a, b]``.
"""
p = self.construct_fast(self.c.copy(), self.x, self.extrapolate)
for axis, n in enumerate(nu):
p._derivative_inplace(n, axis)
p._ensure_c_contiguous()
return p
def antiderivative(self, nu):
"""
Construct a new piecewise polynomial representing the antiderivative.
Antiderivative is also the indefinite integral of the function,
and derivative is its inverse operation.
Parameters
----------
nu : ndim-tuple of int
Order of derivatives to evaluate for each dimension.
If negative, the derivative is returned.
Returns
-------
pp : PPoly
Piecewise polynomial of order k2 = k + n representing
the antiderivative of this polynomial.
Notes
-----
The antiderivative returned by this function is continuous and
continuously differentiable to order n-1, up to floating point
rounding error.
"""
p = self.construct_fast(self.c.copy(), self.x, self.extrapolate)
for axis, n in enumerate(nu):
p._antiderivative_inplace(n, axis)
p._ensure_c_contiguous()
return p
def integrate_1d(self, a, b, axis, extrapolate=None):
r"""
Compute NdPPoly representation for one dimensional definite integral
The result is a piecewise polynomial representing the integral:
.. math::
p(y, z, ...) = \int_a^b dx\, p(x, y, z, ...)
where the dimension integrated over is specified with the
`axis` parameter.
Parameters
----------
a, b : float
Lower and upper bound for integration.
axis : int
Dimension over which to compute the 1D integrals
extrapolate : bool, optional
Whether to extrapolate to out-of-bounds points based on first
and last intervals, or to return NaNs.
Returns
-------
ig : NdPPoly or array-like
Definite integral of the piecewise polynomial over [a, b].
If the polynomial was 1-dimensional, an array is returned,
otherwise, an NdPPoly object.
"""
if extrapolate is None:
extrapolate = self.extrapolate
else:
extrapolate = bool(extrapolate)
ndim = len(self.x)
axis = int(axis) % ndim
# reuse 1D integration routines
c = self.c
swap = list(range(c.ndim))
swap.insert(0, swap[axis])
del swap[axis + 1]
swap.insert(1, swap[ndim + axis])
del swap[ndim + axis + 1]
c = c.transpose(swap)
p = PPoly.construct_fast(c.reshape(c.shape[0], c.shape[1], -1),
self.x[axis],
extrapolate=extrapolate)
out = p.integrate(a, b, extrapolate=extrapolate)
# Construct result
if ndim == 1:
return out.reshape(c.shape[2:])
else:
c = out.reshape(c.shape[2:])
x = self.x[:axis] + self.x[axis + 1:]
return self.construct_fast(c, x, extrapolate=extrapolate)
def integrate(self, ranges, extrapolate=None):
"""
Compute a definite integral over a piecewise polynomial.
Parameters
----------
ranges : ndim-tuple of 2-tuples float
Sequence of lower and upper bounds for each dimension,
``[(a[0], b[0]), ..., (a[ndim-1], b[ndim-1])]``
extrapolate : bool, optional
Whether to extrapolate to out-of-bounds points based on first
and last intervals, or to return NaNs.
Returns
-------
ig : array_like
Definite integral of the piecewise polynomial over
[a[0], b[0]] x ... x [a[ndim-1], b[ndim-1]]
"""
ndim = len(self.x)
if extrapolate is None:
extrapolate = self.extrapolate
else:
extrapolate = bool(extrapolate)
if not hasattr(ranges, '__len__') or len(ranges) != ndim:
raise ValueError("Range not a sequence of correct length")
self._ensure_c_contiguous()
# Reuse 1D integration routine
c = self.c
for n, (a, b) in enumerate(ranges):
swap = list(range(c.ndim))
swap.insert(1, swap[ndim - n])
del swap[ndim - n + 1]
c = c.transpose(swap)
p = PPoly.construct_fast(c, self.x[n], extrapolate=extrapolate)
out = p.integrate(a, b, extrapolate=extrapolate)
c = out.reshape(c.shape[2:])
return c
class RegularGridInterpolator(object):
"""
Interpolation on a regular grid in arbitrary dimensions
The data must be defined on a regular grid; the grid spacing however may be
uneven. Linear and nearest-neighbour interpolation are supported. After
setting up the interpolator object, the interpolation method (*linear* or
*nearest*) may be chosen at each evaluation.
Parameters
----------
points : tuple of ndarray of float, with shapes (m1, ), ..., (mn, )
The points defining the regular grid in n dimensions.
values : array_like, shape (m1, ..., mn, ...)
The data on the regular grid in n dimensions.
method : str, optional
The method of interpolation to perform. Supported are "linear" and
"nearest". This parameter will become the default for the object's
``__call__`` method. Default is "linear".
bounds_error : bool, optional
If True, when interpolated values are requested outside of the
domain of the input data, a ValueError is raised.
If False, then `fill_value` is used.
fill_value : number, optional
If provided, the value to use for points outside of the
interpolation domain. If None, values outside
the domain are extrapolated.
Methods
-------
__call__
Notes
-----
Contrary to LinearNDInterpolator and NearestNDInterpolator, this class
avoids expensive triangulation of the input data by taking advantage of the
regular grid structure.
.. versionadded:: 0.14
Examples
--------
Evaluate a simple example function on the points of a 3D grid:
>>> from scipy.interpolate import RegularGridInterpolator
>>> def f(x, y, z):
... return 2 * x**3 + 3 * y**2 - z
>>> x = np.linspace(1, 4, 11)
>>> y = np.linspace(4, 7, 22)
>>> z = np.linspace(7, 9, 33)
>>> data = f(*np.meshgrid(x, y, z, indexing='ij', sparse=True))
``data`` is now a 3D array with ``data[i,j,k] = f(x[i], y[j], z[k])``.
Next, define an interpolating function from this data:
>>> my_interpolating_function = RegularGridInterpolator((x, y, z), data)
Evaluate the interpolating function at the two points
``(x,y,z) = (2.1, 6.2, 8.3)`` and ``(3.3, 5.2, 7.1)``:
>>> pts = np.array([[2.1, 6.2, 8.3], [3.3, 5.2, 7.1]])
>>> my_interpolating_function(pts)
array([ 125.80469388, 146.30069388])
which is indeed a close approximation to
``[f(2.1, 6.2, 8.3), f(3.3, 5.2, 7.1)]``.
See also
--------
NearestNDInterpolator : Nearest neighbour interpolation on unstructured
data in N dimensions
LinearNDInterpolator : Piecewise linear interpolant on unstructured data
in N dimensions
References
----------
.. [1] Python package *regulargrid* by Johannes Buchner, see
https://pypi.python.org/pypi/regulargrid/
.. [2] Trilinear interpolation. (2013, January 17). In Wikipedia, The Free
Encyclopedia. Retrieved 27 Feb 2013 01:28.
http://en.wikipedia.org/w/index.php?title=Trilinear_interpolation&oldid=533448871
.. [3] Weiser, Alan, and Sergio E. Zarantonello. "A note on piecewise linear
and multilinear table interpolation in many dimensions." MATH.
COMPUT. 50.181 (1988): 189-196.
http://www.ams.org/journals/mcom/1988-50-181/S0025-5718-1988-0917826-0/S0025-5718-1988-0917826-0.pdf
"""
# this class is based on code originally programmed by Johannes Buchner,
# see https://github.com/JohannesBuchner/regulargrid
def __init__(self, points, values, method="linear", bounds_error=True,
fill_value=np.nan):
if method not in ["linear", "nearest"]:
raise ValueError("Method '%s' is not defined" % method)
self.method = method
self.bounds_error = bounds_error
if not hasattr(values, 'ndim'):
# allow reasonable duck-typed values
values = np.asarray(values)
if len(points) > values.ndim:
raise ValueError("There are %d point arrays, but values has %d "
"dimensions" % (len(points), values.ndim))
if hasattr(values, 'dtype') and hasattr(values, 'astype'):
if not np.issubdtype(values.dtype, np.inexact):
values = values.astype(float)
self.fill_value = fill_value
if fill_value is not None:
fill_value_dtype = np.asarray(fill_value).dtype
if (hasattr(values, 'dtype') and not
np.can_cast(fill_value_dtype, values.dtype,
casting='same_kind')):
raise ValueError("fill_value must be either 'None' or "
"of a type compatible with values")
for i, p in enumerate(points):
if not np.all(np.diff(p) > 0.):
raise ValueError("The points in dimension %d must be strictly "
"ascending" % i)
if not np.asarray(p).ndim == 1:
raise ValueError("The points in dimension %d must be "
"1-dimensional" % i)
if not values.shape[i] == len(p):
raise ValueError("There are %d points and %d values in "
"dimension %d" % (len(p), values.shape[i], i))
self.grid = tuple([np.asarray(p) for p in points])
self.values = values
def __call__(self, xi, method=None):
"""
Interpolation at coordinates
Parameters
----------
xi : ndarray of shape (..., ndim)
The coordinates to sample the gridded data at
method : str
The method of interpolation to perform. Supported are "linear" and
"nearest".
"""
method = self.method if method is None else method
if method not in ["linear", "nearest"]:
raise ValueError("Method '%s' is not defined" % method)
ndim = len(self.grid)
xi = _ndim_coords_from_arrays(xi, ndim=ndim)
if xi.shape[-1] != len(self.grid):
raise ValueError("The requested sample points xi have dimension "
"%d, but this RegularGridInterpolator has "
"dimension %d" % (xi.shape[1], ndim))
xi_shape = xi.shape
xi = xi.reshape(-1, xi_shape[-1])
if self.bounds_error:
for i, p in enumerate(xi.T):
if not np.logical_and(np.all(self.grid[i][0] <= p),
np.all(p <= self.grid[i][-1])):
raise ValueError("One of the requested xi is out of bounds "
"in dimension %d" % i)
indices, norm_distances, out_of_bounds = self._find_indices(xi.T)
if method == "linear":
result = self._evaluate_linear(indices,
norm_distances,
out_of_bounds)
elif method == "nearest":
result = self._evaluate_nearest(indices,
norm_distances,
out_of_bounds)
if not self.bounds_error and self.fill_value is not None:
result[out_of_bounds] = self.fill_value
return result.reshape(xi_shape[:-1] + self.values.shape[ndim:])
def _evaluate_linear(self, indices, norm_distances, out_of_bounds):
# slice for broadcasting over trailing dimensions in self.values
vslice = (slice(None),) + (None,) * (self.values.ndim - len(indices))
# find relevant values
# each i and i+1 represents a edge
edges = itertools.product(*[[i, i + 1] for i in indices])
values = 0.
for edge_indices in edges:
weight = 1.
for ei, i, yi in zip(edge_indices, indices, norm_distances):
weight *= np.where(ei == i, 1 - yi, yi)
values += np.asarray(self.values[edge_indices]) * weight[vslice]
return values
def _evaluate_nearest(self, indices, norm_distances, out_of_bounds):
idx_res = []
for i, yi in zip(indices, norm_distances):
idx_res.append(np.where(yi <= .5, i, i + 1))
return self.values[idx_res]
def _find_indices(self, xi):
# find relevant edges between which xi are situated
indices = []
# compute distance to lower edge in unity units
norm_distances = []
# check for out of bounds xi
out_of_bounds = np.zeros((xi.shape[1]), dtype=bool)
# iterate through dimensions
for x, grid in zip(xi, self.grid):
i = np.searchsorted(grid, x) - 1
i[i < 0] = 0
i[i > grid.size - 2] = grid.size - 2
indices.append(i)
norm_distances.append((x - grid[i]) /
(grid[i + 1] - grid[i]))
if not self.bounds_error:
out_of_bounds += x < grid[0]
out_of_bounds += x > grid[-1]
return indices, norm_distances, out_of_bounds
def interpn(points, values, xi, method="linear", bounds_error=True,
fill_value=np.nan):
"""
Multidimensional interpolation on regular grids.
Parameters
----------
points : tuple of ndarray of float, with shapes (m1, ), ..., (mn, )
The points defining the regular grid in n dimensions.
values : array_like, shape (m1, ..., mn, ...)
The data on the regular grid in n dimensions.
xi : ndarray of shape (..., ndim)
The coordinates to sample the gridded data at
method : str, optional
The method of interpolation to perform. Supported are "linear" and
"nearest", and "splinef2d". "splinef2d" is only supported for
2-dimensional data.
bounds_error : bool, optional
If True, when interpolated values are requested outside of the
domain of the input data, a ValueError is raised.
If False, then `fill_value` is used.
fill_value : number, optional
If provided, the value to use for points outside of the
interpolation domain. If None, values outside
the domain are extrapolated. Extrapolation is not supported by method
"splinef2d".
Returns
-------
values_x : ndarray, shape xi.shape[:-1] + values.shape[ndim:]
Interpolated values at input coordinates.
Notes
-----
.. versionadded:: 0.14
See also
--------
NearestNDInterpolator : Nearest neighbour interpolation on unstructured
data in N dimensions
LinearNDInterpolator : Piecewise linear interpolant on unstructured data
in N dimensions
RegularGridInterpolator : Linear and nearest-neighbor Interpolation on a
regular grid in arbitrary dimensions
RectBivariateSpline : Bivariate spline approximation over a rectangular mesh
"""
# sanity check 'method' kwarg
if method not in ["linear", "nearest", "splinef2d"]:
raise ValueError("interpn only understands the methods 'linear', "
"'nearest', and 'splinef2d'. You provided %s." %
method)
if not hasattr(values, 'ndim'):
values = np.asarray(values)
ndim = values.ndim
if ndim > 2 and method == "splinef2d":
raise ValueError("The method spline2fd can only be used for "
"2-dimensional input data")
if not bounds_error and fill_value is None and method == "splinef2d":
raise ValueError("The method spline2fd does not support extrapolation.")
# sanity check consistency of input dimensions
if len(points) > ndim:
raise ValueError("There are %d point arrays, but values has %d "
"dimensions" % (len(points), ndim))
if len(points) != ndim and method == 'splinef2d':
raise ValueError("The method spline2fd can only be used for "
"scalar data with one point per coordinate")
# sanity check input grid
for i, p in enumerate(points):
if not np.all(np.diff(p) > 0.):
raise ValueError("The points in dimension %d must be strictly "
"ascending" % i)
if not np.asarray(p).ndim == 1:
raise ValueError("The points in dimension %d must be "
"1-dimensional" % i)
if not values.shape[i] == len(p):
raise ValueError("There are %d points and %d values in "
"dimension %d" % (len(p), values.shape[i], i))
grid = tuple([np.asarray(p) for p in points])
# sanity check requested xi
xi = _ndim_coords_from_arrays(xi, ndim=len(grid))
if xi.shape[-1] != len(grid):
raise ValueError("The requested sample points xi have dimension "
"%d, but this RegularGridInterpolator has "
"dimension %d" % (xi.shape[1], len(grid)))
for i, p in enumerate(xi.T):
if bounds_error and not np.logical_and(np.all(grid[i][0] <= p),
np.all(p <= grid[i][-1])):
raise ValueError("One of the requested xi is out of bounds "
"in dimension %d" % i)
# perform interpolation
if method == "linear":
interp = RegularGridInterpolator(points, values, method="linear",
bounds_error=bounds_error,
fill_value=fill_value)
return interp(xi)
elif method == "nearest":
interp = RegularGridInterpolator(points, values, method="nearest",
bounds_error=bounds_error,
fill_value=fill_value)
return interp(xi)
elif method == "splinef2d":
xi_shape = xi.shape
xi = xi.reshape(-1, xi.shape[-1])
# RectBivariateSpline doesn't support fill_value; we need to wrap here
idx_valid = np.all((grid[0][0] <= xi[:, 0], xi[:, 0] <= grid[0][-1],
grid[1][0] <= xi[:, 1], xi[:, 1] <= grid[1][-1]),
axis=0)
result = np.empty_like(xi[:, 0])
# make a copy of values for RectBivariateSpline
interp = RectBivariateSpline(points[0], points[1], values[:])
result[idx_valid] = interp.ev(xi[idx_valid, 0], xi[idx_valid, 1])
result[np.logical_not(idx_valid)] = fill_value
return result.reshape(xi_shape[:-1])
# backward compatibility wrapper
class ppform(PPoly):
"""
Deprecated piecewise polynomial class.
New code should use the `PPoly` class instead.
"""
def __init__(self, coeffs, breaks, fill=0.0, sort=False):
warnings.warn("ppform is deprecated -- use PPoly instead",
category=DeprecationWarning)
if sort:
breaks = np.sort(breaks)
else:
breaks = np.asarray(breaks)
PPoly.__init__(self, coeffs, breaks)
self.coeffs = self.c
self.breaks = self.x
self.K = self.coeffs.shape[0]
self.fill = fill
self.a = self.breaks[0]
self.b = self.breaks[-1]
def __call__(self, x):
return PPoly.__call__(self, x, 0, False)
def _evaluate(self, x, nu, extrapolate, out):
PPoly._evaluate(self, x, nu, extrapolate, out)
out[~((x >= self.a) & (x <= self.b))] = self.fill
return out
@classmethod
def fromspline(cls, xk, cvals, order, fill=0.0):
# Note: this spline representation is incompatible with FITPACK
N = len(xk) - 1
sivals = np.empty((order + 1, N), dtype=float)
for m in xrange(order, -1, -1):
fact = spec.gamma(m + 1)
res = _fitpack._bspleval(xk[:-1], xk, cvals, order, m)
res /= fact
sivals[order - m, :] = res
return cls(sivals, xk, fill=fill)
# The 3 private functions below can be called by splmake().
def _dot0(a, b):
"""Similar to numpy.dot, but sum over last axis of a and 1st axis of b"""
if b.ndim <= 2:
return dot(a, b)
else:
axes = list(range(b.ndim))
axes.insert(-1, 0)
axes.pop(0)
return dot(a, b.transpose(axes))
def _find_smoothest(xk, yk, order, conds=None, B=None):
# construct Bmatrix, and Jmatrix
# e = J*c
# minimize norm(e,2) given B*c=yk
# if desired B can be given
# conds is ignored
N = len(xk) - 1
K = order
if B is None:
B = _fitpack._bsplmat(order, xk)
J = _fitpack._bspldismat(order, xk)
u, s, vh = scipy.linalg.svd(B)
ind = K - 1
V2 = vh[-ind:, :].T
V1 = vh[:-ind, :].T
A = dot(J.T, J)
tmp = dot(V2.T, A)
Q = dot(tmp, V2)
p = scipy.linalg.solve(Q, tmp)
tmp = dot(V2, p)
tmp = np.eye(N + K) - tmp
tmp = dot(tmp, V1)
tmp = dot(tmp, np.diag(1.0 / s))
tmp = dot(tmp, u.T)
return _dot0(tmp, yk)
# conds is a tuple of an array and a vector
# giving the left-hand and the right-hand side
# of the additional equations to add to B
def _find_user(xk, yk, order, conds, B):
lh = conds[0]
rh = conds[1]
B = np.concatenate((B, lh), axis=0)
w = np.concatenate((yk, rh), axis=0)
M, N = B.shape
if (M > N):
raise ValueError("over-specification of conditions")
elif (M < N):
return _find_smoothest(xk, yk, order, None, B)
else:
return scipy.linalg.solve(B, w)
# Remove the 3 private functions above as well when removing splmake
@np.deprecate(message="splmake is deprecated in scipy 0.19.0, "
"use make_interp_spline instead.")
def splmake(xk, yk, order=3, kind='smoothest', conds=None):
"""
Return a representation of a spline given data-points at internal knots
Parameters
----------
xk : array_like
The input array of x values of rank 1
yk : array_like
The input array of y values of rank N. `yk` can be an N-d array to
represent more than one curve, through the same `xk` points. The first
dimension is assumed to be the interpolating dimension and is the same
length of `xk`.
order : int, optional
Order of the spline
kind : str, optional
Can be 'smoothest', 'not_a_knot', 'fixed', 'clamped', 'natural',
'periodic', 'symmetric', 'user', 'mixed' and it is ignored if order < 2
conds : optional
Conds
Returns
-------
splmake : tuple
Return a (`xk`, `cvals`, `k`) representation of a spline given
data-points where the (internal) knots are at the data-points.
"""
yk = np.asanyarray(yk)
order = int(order)
if order < 0:
raise ValueError("order must not be negative")
if order == 0:
return xk, yk[:-1], order
elif order == 1:
return xk, yk, order
try:
func = eval('_find_%s' % kind)
except:
raise NotImplementedError
# the constraint matrix
B = _fitpack._bsplmat(order, xk)
coefs = func(xk, yk, order, conds, B)
return xk, coefs, order
@np.deprecate(message="spleval is deprecated in scipy 0.19.0, "
"use BSpline instead.")
def spleval(xck, xnew, deriv=0):
"""
Evaluate a fixed spline represented by the given tuple at the new x-values
The `xj` values are the interior knot points. The approximation
region is `xj[0]` to `xj[-1]`. If N+1 is the length of `xj`, then `cvals`
should have length N+k where `k` is the order of the spline.
Parameters
----------
(xj, cvals, k) : tuple
Parameters that define the fixed spline
xj : array_like
Interior knot points
cvals : array_like
Curvature
k : int
Order of the spline
xnew : array_like
Locations to calculate spline
deriv : int
Deriv
Returns
-------
spleval : ndarray
If `cvals` represents more than one curve (`cvals.ndim` > 1) and/or
`xnew` is N-d, then the result is `xnew.shape` + `cvals.shape[1:]`
providing the interpolation of multiple curves.
Notes
-----
Internally, an additional `k`-1 knot points are added on either side of
the spline.
"""
(xj, cvals, k) = xck
oldshape = np.shape(xnew)
xx = np.ravel(xnew)
sh = cvals.shape[1:]
res = np.empty(xx.shape + sh, dtype=cvals.dtype)
for index in np.ndindex(*sh):
sl = (slice(None),) + index
if issubclass(cvals.dtype.type, np.complexfloating):
res[sl].real = _fitpack._bspleval(xx, xj, cvals.real[sl], k, deriv)
res[sl].imag = _fitpack._bspleval(xx, xj, cvals.imag[sl], k, deriv)
else:
res[sl] = _fitpack._bspleval(xx, xj, cvals[sl], k, deriv)
res.shape = oldshape + sh
return res
@np.deprecate(message="spltopp is deprecated in scipy 0.19.0, "
"use PPoly.from_spline instead.")
def spltopp(xk, cvals, k):
"""Return a piece-wise polynomial object from a fixed-spline tuple."""
return ppform.fromspline(xk, cvals, k)
@np.deprecate(message="spline is deprecated in scipy 0.19.0, "
"use Bspline class instead.")
def spline(xk, yk, xnew, order=3, kind='smoothest', conds=None):
"""
Interpolate a curve at new points using a spline fit
Parameters
----------
xk, yk : array_like
The x and y values that define the curve.
xnew : array_like
The x values where spline should estimate the y values.
order : int
Default is 3.
kind : string
One of {'smoothest'}
conds : Don't know
Don't know
Returns
-------
spline : ndarray
An array of y values; the spline evaluated at the positions `xnew`.
"""
return spleval(splmake(xk, yk, order=order, kind=kind, conds=conds), xnew)
| mit |
linebp/pandas | asv_bench/benchmarks/index_object.py | 4 | 5469 | from .pandas_vb_common import *
class SetOperations(object):
goal_time = 0.2
def setup(self):
self.rng = date_range('1/1/2000', periods=10000, freq='T')
self.rng2 = self.rng[:(-1)]
# object index with datetime values
if (self.rng.dtype == object):
self.idx_rng = self.rng.view(Index)
else:
self.idx_rng = self.rng.asobject
self.idx_rng2 = self.idx_rng[:(-1)]
# other datetime
N = 100000
A = N - 20000
B = N + 20000
self.dtidx1 = DatetimeIndex(range(N))
self.dtidx2 = DatetimeIndex(range(A, B))
self.dtidx3 = DatetimeIndex(range(N, B))
# integer
self.N = 1000000
self.options = np.arange(self.N)
self.left = Index(
self.options.take(np.random.permutation(self.N)[:(self.N // 2)]))
self.right = Index(
self.options.take(np.random.permutation(self.N)[:(self.N // 2)]))
# strings
N = 10000
strs = tm.rands_array(10, N)
self.leftstr = Index(strs[:N * 2 // 3])
self.rightstr = Index(strs[N // 3:])
def time_datetime_intersection(self):
self.rng.intersection(self.rng2)
def time_datetime_union(self):
self.rng.union(self.rng2)
def time_datetime_difference(self):
self.dtidx1.difference(self.dtidx2)
def time_datetime_difference_disjoint(self):
self.dtidx1.difference(self.dtidx3)
def time_datetime_symmetric_difference(self):
self.dtidx1.symmetric_difference(self.dtidx2)
def time_index_datetime_intersection(self):
self.idx_rng.intersection(self.idx_rng2)
def time_index_datetime_union(self):
self.idx_rng.union(self.idx_rng2)
def time_int64_intersection(self):
self.left.intersection(self.right)
def time_int64_union(self):
self.left.union(self.right)
def time_int64_difference(self):
self.left.difference(self.right)
def time_int64_symmetric_difference(self):
self.left.symmetric_difference(self.right)
def time_str_difference(self):
self.leftstr.difference(self.rightstr)
def time_str_symmetric_difference(self):
self.leftstr.symmetric_difference(self.rightstr)
class Datetime(object):
goal_time = 0.2
def setup(self):
self.dr = pd.date_range('20000101', freq='D', periods=10000)
def time_is_dates_only(self):
self.dr._is_dates_only
class Float64(object):
goal_time = 0.2
def setup(self):
self.idx = tm.makeFloatIndex(1000000)
self.mask = ((np.arange(self.idx.size) % 3) == 0)
self.series_mask = Series(self.mask)
self.baseidx = np.arange(1000000.0)
def time_boolean_indexer(self):
self.idx[self.mask]
def time_boolean_series_indexer(self):
self.idx[self.series_mask]
def time_construct(self):
Index(self.baseidx)
def time_div(self):
(self.idx / 2)
def time_get(self):
self.idx[1]
def time_mul(self):
(self.idx * 2)
def time_slice_indexer_basic(self):
self.idx[:(-1)]
def time_slice_indexer_even(self):
self.idx[::2]
class StringIndex(object):
goal_time = 0.2
def setup(self):
self.idx = tm.makeStringIndex(1000000)
self.mask = ((np.arange(1000000) % 3) == 0)
self.series_mask = Series(self.mask)
def time_boolean_indexer(self):
self.idx[self.mask]
def time_boolean_series_indexer(self):
self.idx[self.series_mask]
def time_slice_indexer_basic(self):
self.idx[:(-1)]
def time_slice_indexer_even(self):
self.idx[::2]
class Multi1(object):
goal_time = 0.2
def setup(self):
(n, k) = (200, 5000)
self.levels = [np.arange(n), tm.makeStringIndex(n).values, (1000 + np.arange(n))]
self.labels = [np.random.choice(n, (k * n)) for lev in self.levels]
self.mi = MultiIndex(levels=self.levels, labels=self.labels)
self.iterables = [tm.makeStringIndex(10000), range(20)]
def time_duplicated(self):
self.mi.duplicated()
def time_from_product(self):
MultiIndex.from_product(self.iterables)
class Multi2(object):
goal_time = 0.2
def setup(self):
self.n = ((((3 * 5) * 7) * 11) * (1 << 10))
(low, high) = (((-1) << 12), (1 << 12))
self.f = (lambda k: np.repeat(np.random.randint(low, high, (self.n // k)), k))
self.i = np.random.permutation(self.n)
self.mi = MultiIndex.from_arrays([self.f(11), self.f(7), self.f(5), self.f(3), self.f(1)])[self.i]
self.a = np.repeat(np.arange(100), 1000)
self.b = np.tile(np.arange(1000), 100)
self.midx2 = MultiIndex.from_arrays([self.a, self.b])
self.midx2 = self.midx2.take(np.random.permutation(np.arange(100000)))
def time_sortlevel_int64(self):
self.mi.sortlevel()
def time_sortlevel_zero(self):
self.midx2.sortlevel(0)
def time_sortlevel_one(self):
self.midx2.sortlevel(1)
class Multi3(object):
goal_time = 0.2
def setup(self):
self.level1 = range(1000)
self.level2 = date_range(start='1/1/2012', periods=100)
self.mi = MultiIndex.from_product([self.level1, self.level2])
def time_datetime_level_values_full(self):
self.mi.copy().values
def time_datetime_level_values_sliced(self):
self.mi[:10].values
| bsd-3-clause |
reimandlab/Visualistion-Framework-for-Genome-Mutations | website/tests/test_imports/ptm_sites/test_site_import.py | 1 | 6041 | from timeit import Timer
from types import SimpleNamespace as RawSite
from functools import partial
from pandas import DataFrame
from pytest import warns
from database import db, create_key_model_dict
from database_testing import DatabaseTest
from imports.sites.site_importer import SiteImporter
from imports.sites.site_mapper import find_all, find_all_regex
from imports.sites.site_mapper import SiteMapper
from models import Protein, Gene
def test_find_all():
background = 'Lorem ipsum dololor L'
cases = {
'L': [0, 20],
'o': [1, 13, 15, 17],
'olo': [13, 15],
'^L': [0],
'^Lorem': [0],
'L$': [20],
' L$': [19],
'not matching': []
}
for query, expected_result in cases.items():
assert find_all(background, query) == expected_result
for case in cases:
regexp_time = Timer(partial(find_all_regex, background, case)).timeit()
custom_time = Timer(partial(find_all, background, case)).timeit()
assert find_all_regex(background, case) == find_all(background, case)
assert custom_time < regexp_time
print(
f'find_all() was faster than find_all_regex() '
f'by {custom_time / regexp_time * 100}%')
def create_importer(*args, offset=7, **kwargs):
class MinimalSiteImporter(SiteImporter):
source_name = 'DummySource'
site_offset = offset
def load_sites(self, *args, **kwargs):
return []
site_types = []
return MinimalSiteImporter(*args, **kwargs)
def group_by_isoform(sites: DataFrame):
return {site.refseq: site for site in sites.itertuples(index=False)}
class TestImport(DatabaseTest):
def test_extract_sequence_and_offset(self):
protein = Protein(refseq='NM_0001', sequence='MSSSGTPDLPVLLTDLKIQYTKIFINNEWHDSVSGK')
db.session.add(protein)
importer = create_importer(offset=7)
cases = [
[RawSite(position=2, refseq='NM_0001', residue='S'), '^MSSSGTPDL', 1],
[RawSite(position=10, refseq='NM_0001', residue='P'), 'SSGTPDLPVLLTDLK', 7]
]
for site, sequence, offset in cases:
assert importer.extract_site_surrounding_sequence(site) == sequence
assert importer.determine_left_offset(site) == offset
def test_map_site_to_isoform(self):
mapper = SiteMapper([], lambda s: f'{s.position}{s.sequence}')
site = RawSite(sequence='FIN', position=6, left_sequence_offset=1)
protein = Protein(sequence='LKIQYTKIFINNEWHDSVSG')
assert mapper.map_site_to_isoform(site, protein) == [10]
with warns(UserWarning, match='More than one match for: 2KI'):
site = RawSite(sequence='KI', position=2, left_sequence_offset=0)
assert mapper.map_site_to_isoform(site, protein) == [2, 7]
def test_mapping(self):
gene_a = Gene(name='A', isoforms=[
# the full isoform of gene A
Protein(refseq='NM_01', sequence='AAAAAAAAAXAA'),
# a trimmed isoform of gene A
Protein(refseq='NM_02', sequence='AAAXAA'),
])
gene_b = Gene(name='B', isoforms=[
Protein(refseq='NM_03', sequence='BBBBBBBBBYBB'),
Protein(refseq='NM_04', sequence='BBBYBB'),
])
db.session.add_all([gene_a, gene_b])
# whoops, NM_03 has be accidentally removed (!)
db.session.delete(Protein.query.filter_by(refseq='NM_03').one())
db.session.commit()
mapper = SiteMapper(
create_key_model_dict(Protein, 'refseq'),
lambda s: f'{s.position}{s.residue}'
)
sites = DataFrame.from_dict(data={
'good site A': ('A', 'NM_01', 10, 'AXA', 'X', 1),
'lost isoform': ('B', 'NM_03', 10, 'BYB', 'Y', 1)
}, orient='index')
sites.columns = [
'gene', 'refseq', 'position', 'sequence', 'residue', 'left_sequence_offset'
]
mapped_sites = mapper.map_sites_by_sequence(sites)
sites_by_isoform = group_by_isoform(mapped_sites)
# one from NM_01 (defined), from NM_02 (mapped), from NM_04 (mapped)
assert len(mapped_sites) == 3
assert set(sites_by_isoform) == {'NM_01', 'NM_02', 'NM_04'}
assert sites_by_isoform['NM_01'].residue == sites_by_isoform['NM_02'].residue == 'X'
assert sites_by_isoform['NM_01'].position == 10
assert sites_by_isoform['NM_02'].position == 4
assert sites_by_isoform['NM_04'].residue == 'Y'
assert sites_by_isoform['NM_04'].position == 4
# will the mapping to NM_02 still work if we remove 'gene' column?
sites.drop(columns=['gene'], inplace=True)
mapped_sites = mapper.map_sites_by_sequence(sites)
sites_by_isoform = group_by_isoform(mapped_sites)
assert len(mapped_sites) == 2
assert set(sites_by_isoform) == {'NM_01', 'NM_02'}
def test_edge_cases_mapping(self):
gene_t = Gene(name='T', isoforms=[
# 123456789
Protein(refseq='NM_01', sequence='AXAXAYAYA'),
# C-terminal part was trimmed
Protein(refseq='NM_02', sequence='AXAXA'),
# N-terminal part was trimmed
Protein(refseq='NM_03', sequence='AYAYA'),
])
db.session.add(gene_t)
db.session.commit()
mapper = SiteMapper(
create_key_model_dict(Protein, 'refseq'),
lambda s: f'{s.position}{s.residue}'
)
# all sites in NM_01, the idea is to test
sites = DataFrame.from_dict(data={
'site at N-terminus edge': ('T', 'NM_01', 1, '^AX', 'A', 2),
'site at C-terminus edge': ('T', 'NM_01', 9, 'YA$', 'A', 2),
}, orient='index')
sites.columns = [
'gene', 'refseq', 'position', 'sequence', 'residue', 'left_sequence_offset'
]
mapped_sites = mapper.map_sites_by_sequence(sites)
assert len(mapped_sites) == 4
| lgpl-2.1 |
fengzhyuan/scikit-learn | sklearn/ensemble/tests/test_partial_dependence.py | 365 | 6996 | """
Testing for the partial dependence module.
"""
import numpy as np
from numpy.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import if_matplotlib
from sklearn.ensemble.partial_dependence import partial_dependence
from sklearn.ensemble.partial_dependence import plot_partial_dependence
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import GradientBoostingRegressor
from sklearn import datasets
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# also load the boston dataset
boston = datasets.load_boston()
# also load the iris dataset
iris = datasets.load_iris()
def test_partial_dependence_classifier():
# Test partial dependence for classifier
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(X, y)
pdp, axes = partial_dependence(clf, [0], X=X, grid_resolution=5)
# only 4 grid points instead of 5 because only 4 unique X[:,0] vals
assert pdp.shape == (1, 4)
assert axes[0].shape[0] == 4
# now with our own grid
X_ = np.asarray(X)
grid = np.unique(X_[:, 0])
pdp_2, axes = partial_dependence(clf, [0], grid=grid)
assert axes is None
assert_array_equal(pdp, pdp_2)
def test_partial_dependence_multiclass():
# Test partial dependence for multi-class classifier
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(iris.data, iris.target)
grid_resolution = 25
n_classes = clf.n_classes_
pdp, axes = partial_dependence(
clf, [0], X=iris.data, grid_resolution=grid_resolution)
assert pdp.shape == (n_classes, grid_resolution)
assert len(axes) == 1
assert axes[0].shape[0] == grid_resolution
def test_partial_dependence_regressor():
# Test partial dependence for regressor
clf = GradientBoostingRegressor(n_estimators=10, random_state=1)
clf.fit(boston.data, boston.target)
grid_resolution = 25
pdp, axes = partial_dependence(
clf, [0], X=boston.data, grid_resolution=grid_resolution)
assert pdp.shape == (1, grid_resolution)
assert axes[0].shape[0] == grid_resolution
def test_partial_dependecy_input():
# Test input validation of partial dependence.
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(X, y)
assert_raises(ValueError, partial_dependence,
clf, [0], grid=None, X=None)
assert_raises(ValueError, partial_dependence,
clf, [0], grid=[0, 1], X=X)
# first argument must be an instance of BaseGradientBoosting
assert_raises(ValueError, partial_dependence,
{}, [0], X=X)
# Gradient boosting estimator must be fit
assert_raises(ValueError, partial_dependence,
GradientBoostingClassifier(), [0], X=X)
assert_raises(ValueError, partial_dependence, clf, [-1], X=X)
assert_raises(ValueError, partial_dependence, clf, [100], X=X)
# wrong ndim for grid
grid = np.random.rand(10, 2, 1)
assert_raises(ValueError, partial_dependence, clf, [0], grid=grid)
@if_matplotlib
def test_plot_partial_dependence():
# Test partial dependence plot function.
clf = GradientBoostingRegressor(n_estimators=10, random_state=1)
clf.fit(boston.data, boston.target)
grid_resolution = 25
fig, axs = plot_partial_dependence(clf, boston.data, [0, 1, (0, 1)],
grid_resolution=grid_resolution,
feature_names=boston.feature_names)
assert len(axs) == 3
assert all(ax.has_data for ax in axs)
# check with str features and array feature names
fig, axs = plot_partial_dependence(clf, boston.data, ['CRIM', 'ZN',
('CRIM', 'ZN')],
grid_resolution=grid_resolution,
feature_names=boston.feature_names)
assert len(axs) == 3
assert all(ax.has_data for ax in axs)
# check with list feature_names
feature_names = boston.feature_names.tolist()
fig, axs = plot_partial_dependence(clf, boston.data, ['CRIM', 'ZN',
('CRIM', 'ZN')],
grid_resolution=grid_resolution,
feature_names=feature_names)
assert len(axs) == 3
assert all(ax.has_data for ax in axs)
@if_matplotlib
def test_plot_partial_dependence_input():
# Test partial dependence plot function input checks.
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
# not fitted yet
assert_raises(ValueError, plot_partial_dependence,
clf, X, [0])
clf.fit(X, y)
assert_raises(ValueError, plot_partial_dependence,
clf, np.array(X)[:, :0], [0])
# first argument must be an instance of BaseGradientBoosting
assert_raises(ValueError, plot_partial_dependence,
{}, X, [0])
# must be larger than -1
assert_raises(ValueError, plot_partial_dependence,
clf, X, [-1])
# too large feature value
assert_raises(ValueError, plot_partial_dependence,
clf, X, [100])
# str feature but no feature_names
assert_raises(ValueError, plot_partial_dependence,
clf, X, ['foobar'])
# not valid features value
assert_raises(ValueError, plot_partial_dependence,
clf, X, [{'foo': 'bar'}])
@if_matplotlib
def test_plot_partial_dependence_multiclass():
# Test partial dependence plot function on multi-class input.
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(iris.data, iris.target)
grid_resolution = 25
fig, axs = plot_partial_dependence(clf, iris.data, [0, 1],
label=0,
grid_resolution=grid_resolution)
assert len(axs) == 2
assert all(ax.has_data for ax in axs)
# now with symbol labels
target = iris.target_names[iris.target]
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(iris.data, target)
grid_resolution = 25
fig, axs = plot_partial_dependence(clf, iris.data, [0, 1],
label='setosa',
grid_resolution=grid_resolution)
assert len(axs) == 2
assert all(ax.has_data for ax in axs)
# label not in gbrt.classes_
assert_raises(ValueError, plot_partial_dependence,
clf, iris.data, [0, 1], label='foobar',
grid_resolution=grid_resolution)
# label not provided
assert_raises(ValueError, plot_partial_dependence,
clf, iris.data, [0, 1],
grid_resolution=grid_resolution)
| bsd-3-clause |
louisLouL/pair_trading | capstone_env/lib/python3.6/site-packages/matplotlib/backends/backend_tkagg.py | 2 | 39111 | # Todd Miller [email protected]
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import tkinter as Tk
from six.moves import tkinter_filedialog as FileDialog
import os, sys, math
import os.path
# Paint image to Tk photo blitter extension
import matplotlib.backends.tkagg as tkagg
from matplotlib.backends.backend_agg import FigureCanvasAgg
import matplotlib.backends.windowing as windowing
import matplotlib
from matplotlib.backend_bases import (
_Backend, FigureCanvasBase, FigureManagerBase, GraphicsContextBase,
NavigationToolbar2, RendererBase, StatusbarBase, TimerBase,
ToolContainerBase, cursors)
from matplotlib.backend_managers import ToolManager
from matplotlib import backend_tools
from matplotlib._pylab_helpers import Gcf
from matplotlib.figure import Figure
from matplotlib.widgets import SubplotTool
import matplotlib.cbook as cbook
rcParams = matplotlib.rcParams
verbose = matplotlib.verbose
backend_version = Tk.TkVersion
# the true dots per inch on the screen; should be display dependent
# see http://groups.google.com/groups?q=screen+dpi+x11&hl=en&lr=&ie=UTF-8&oe=UTF-8&safe=off&selm=7077.26e81ad5%40swift.cs.tcd.ie&rnum=5 for some info about screen dpi
PIXELS_PER_INCH = 75
cursord = {
cursors.MOVE: "fleur",
cursors.HAND: "hand2",
cursors.POINTER: "arrow",
cursors.SELECT_REGION: "tcross",
cursors.WAIT: "watch",
}
def raise_msg_to_str(msg):
"""msg is a return arg from a raise. Join with new lines"""
if not isinstance(msg, six.string_types):
msg = '\n'.join(map(str, msg))
return msg
def error_msg_tkpaint(msg, parent=None):
from six.moves import tkinter_messagebox as tkMessageBox
tkMessageBox.showerror("matplotlib", msg)
class TimerTk(TimerBase):
'''
Subclass of :class:`backend_bases.TimerBase` that uses Tk's timer events.
Attributes
----------
interval : int
The time between timer events in milliseconds. Default is 1000 ms.
single_shot : bool
Boolean flag indicating whether this timer should operate as single
shot (run once and then stop). Defaults to False.
callbacks : list
Stores list of (func, args) tuples that will be called upon timer
events. This list can be manipulated directly, or the functions
`add_callback` and `remove_callback` can be used.
'''
def __init__(self, parent, *args, **kwargs):
TimerBase.__init__(self, *args, **kwargs)
self.parent = parent
self._timer = None
def _timer_start(self):
self._timer_stop()
self._timer = self.parent.after(self._interval, self._on_timer)
def _timer_stop(self):
if self._timer is not None:
self.parent.after_cancel(self._timer)
self._timer = None
def _on_timer(self):
TimerBase._on_timer(self)
# Tk after() is only a single shot, so we need to add code here to
# reset the timer if we're not operating in single shot mode. However,
# if _timer is None, this means that _timer_stop has been called; so
# don't recreate the timer in that case.
if not self._single and self._timer:
self._timer = self.parent.after(self._interval, self._on_timer)
else:
self._timer = None
class FigureCanvasTkAgg(FigureCanvasAgg):
keyvald = {65507 : 'control',
65505 : 'shift',
65513 : 'alt',
65515 : 'super',
65508 : 'control',
65506 : 'shift',
65514 : 'alt',
65361 : 'left',
65362 : 'up',
65363 : 'right',
65364 : 'down',
65307 : 'escape',
65470 : 'f1',
65471 : 'f2',
65472 : 'f3',
65473 : 'f4',
65474 : 'f5',
65475 : 'f6',
65476 : 'f7',
65477 : 'f8',
65478 : 'f9',
65479 : 'f10',
65480 : 'f11',
65481 : 'f12',
65300 : 'scroll_lock',
65299 : 'break',
65288 : 'backspace',
65293 : 'enter',
65379 : 'insert',
65535 : 'delete',
65360 : 'home',
65367 : 'end',
65365 : 'pageup',
65366 : 'pagedown',
65438 : '0',
65436 : '1',
65433 : '2',
65435 : '3',
65430 : '4',
65437 : '5',
65432 : '6',
65429 : '7',
65431 : '8',
65434 : '9',
65451 : '+',
65453 : '-',
65450 : '*',
65455 : '/',
65439 : 'dec',
65421 : 'enter',
}
_keycode_lookup = {
262145: 'control',
524320: 'alt',
524352: 'alt',
1048584: 'super',
1048592: 'super',
131074: 'shift',
131076: 'shift',
}
"""_keycode_lookup is used for badly mapped (i.e. no event.key_sym set)
keys on apple keyboards."""
def __init__(self, figure, master=None, resize_callback=None):
FigureCanvasAgg.__init__(self, figure)
self._idle = True
self._idle_callback = None
t1,t2,w,h = self.figure.bbox.bounds
w, h = int(w), int(h)
self._tkcanvas = Tk.Canvas(
master=master, width=w, height=h, borderwidth=0,
highlightthickness=0)
self._tkphoto = Tk.PhotoImage(
master=self._tkcanvas, width=w, height=h)
self._tkcanvas.create_image(w//2, h//2, image=self._tkphoto)
self._resize_callback = resize_callback
self._tkcanvas.bind("<Configure>", self.resize)
self._tkcanvas.bind("<Key>", self.key_press)
self._tkcanvas.bind("<Motion>", self.motion_notify_event)
self._tkcanvas.bind("<KeyRelease>", self.key_release)
for name in "<Button-1>", "<Button-2>", "<Button-3>":
self._tkcanvas.bind(name, self.button_press_event)
for name in "<Double-Button-1>", "<Double-Button-2>", "<Double-Button-3>":
self._tkcanvas.bind(name, self.button_dblclick_event)
for name in "<ButtonRelease-1>", "<ButtonRelease-2>", "<ButtonRelease-3>":
self._tkcanvas.bind(name, self.button_release_event)
# Mouse wheel on Linux generates button 4/5 events
for name in "<Button-4>", "<Button-5>":
self._tkcanvas.bind(name, self.scroll_event)
# Mouse wheel for windows goes to the window with the focus.
# Since the canvas won't usually have the focus, bind the
# event to the window containing the canvas instead.
# See http://wiki.tcl.tk/3893 (mousewheel) for details
root = self._tkcanvas.winfo_toplevel()
root.bind("<MouseWheel>", self.scroll_event_windows, "+")
# Can't get destroy events by binding to _tkcanvas. Therefore, bind
# to the window and filter.
def filter_destroy(evt):
if evt.widget is self._tkcanvas:
self.close_event()
root.bind("<Destroy>", filter_destroy, "+")
self._master = master
self._tkcanvas.focus_set()
def resize(self, event):
width, height = event.width, event.height
if self._resize_callback is not None:
self._resize_callback(event)
# compute desired figure size in inches
dpival = self.figure.dpi
winch = width/dpival
hinch = height/dpival
self.figure.set_size_inches(winch, hinch, forward=False)
self._tkcanvas.delete(self._tkphoto)
self._tkphoto = Tk.PhotoImage(
master=self._tkcanvas, width=int(width), height=int(height))
self._tkcanvas.create_image(int(width/2),int(height/2),image=self._tkphoto)
self.resize_event()
self.show()
# a resizing will in general move the pointer position
# relative to the canvas, so process it as a motion notify
# event. An intended side effect of this call is to allow
# window raises (which trigger a resize) to get the cursor
# position to the mpl event framework so key presses which are
# over the axes will work w/o clicks or explicit motion
self._update_pointer_position(event)
def _update_pointer_position(self, guiEvent=None):
"""
Figure out if we are inside the canvas or not and update the
canvas enter/leave events
"""
# if the pointer if over the canvas, set the lastx and lasty
# attrs of the canvas so it can process event w/o mouse click
# or move
# the window's upper, left coords in screen coords
xw = self._tkcanvas.winfo_rootx()
yw = self._tkcanvas.winfo_rooty()
# the pointer's location in screen coords
xp, yp = self._tkcanvas.winfo_pointerxy()
# not figure out the canvas coordinates of the pointer
xc = xp - xw
yc = yp - yw
# flip top/bottom
yc = self.figure.bbox.height - yc
# JDH: this method was written originally to get the pointer
# location to the backend lastx and lasty attrs so that events
# like KeyEvent can be handled without mouse events. e.g., if
# the cursor is already above the axes, then key presses like
# 'g' should toggle the grid. In order for this to work in
# backend_bases, the canvas needs to know _lastx and _lasty.
# There are three ways to get this info the canvas:
#
# 1) set it explicitly
#
# 2) call enter/leave events explicitly. The downside of this
# in the impl below is that enter could be repeatedly
# triggered if thes mouse is over the axes and one is
# resizing with the keyboard. This is not entirely bad,
# because the mouse position relative to the canvas is
# changing, but it may be surprising to get repeated entries
# without leaves
#
# 3) process it as a motion notify event. This also has pros
# and cons. The mouse is moving relative to the window, but
# this may surpise an event handler writer who is getting
# motion_notify_events even if the mouse has not moved
# here are the three scenarios
if 1:
# just manually set it
self._lastx, self._lasty = xc, yc
elif 0:
# alternate implementation: process it as a motion
FigureCanvasBase.motion_notify_event(self, xc, yc, guiEvent)
elif 0:
# alternate implementation -- process enter/leave events
# instead of motion/notify
if self.figure.bbox.contains(xc, yc):
self.enter_notify_event(guiEvent, xy=(xc,yc))
else:
self.leave_notify_event(guiEvent)
def draw(self):
FigureCanvasAgg.draw(self)
tkagg.blit(self._tkphoto, self.renderer._renderer, colormode=2)
self._master.update_idletasks()
def blit(self, bbox=None):
tkagg.blit(self._tkphoto, self.renderer._renderer, bbox=bbox, colormode=2)
self._master.update_idletasks()
show = draw
def draw_idle(self):
'update drawing area only if idle'
if self._idle is False:
return
self._idle = False
def idle_draw(*args):
try:
self.draw()
finally:
self._idle = True
self._idle_callback = self._tkcanvas.after_idle(idle_draw)
def get_tk_widget(self):
"""returns the Tk widget used to implement FigureCanvasTkAgg.
Although the initial implementation uses a Tk canvas, this routine
is intended to hide that fact.
"""
return self._tkcanvas
def motion_notify_event(self, event):
x = event.x
# flipy so y=0 is bottom of canvas
y = self.figure.bbox.height - event.y
FigureCanvasBase.motion_notify_event(self, x, y, guiEvent=event)
def button_press_event(self, event, dblclick=False):
x = event.x
# flipy so y=0 is bottom of canvas
y = self.figure.bbox.height - event.y
num = getattr(event, 'num', None)
if sys.platform=='darwin':
# 2 and 3 were reversed on the OSX platform I
# tested under tkagg
if num==2: num=3
elif num==3: num=2
FigureCanvasBase.button_press_event(self, x, y, num, dblclick=dblclick, guiEvent=event)
def button_dblclick_event(self,event):
self.button_press_event(event,dblclick=True)
def button_release_event(self, event):
x = event.x
# flipy so y=0 is bottom of canvas
y = self.figure.bbox.height - event.y
num = getattr(event, 'num', None)
if sys.platform=='darwin':
# 2 and 3 were reversed on the OSX platform I
# tested under tkagg
if num==2: num=3
elif num==3: num=2
FigureCanvasBase.button_release_event(self, x, y, num, guiEvent=event)
def scroll_event(self, event):
x = event.x
y = self.figure.bbox.height - event.y
num = getattr(event, 'num', None)
if num==4: step = +1
elif num==5: step = -1
else: step = 0
FigureCanvasBase.scroll_event(self, x, y, step, guiEvent=event)
def scroll_event_windows(self, event):
"""MouseWheel event processor"""
# need to find the window that contains the mouse
w = event.widget.winfo_containing(event.x_root, event.y_root)
if w == self._tkcanvas:
x = event.x_root - w.winfo_rootx()
y = event.y_root - w.winfo_rooty()
y = self.figure.bbox.height - y
step = event.delta/120.
FigureCanvasBase.scroll_event(self, x, y, step, guiEvent=event)
def _get_key(self, event):
val = event.keysym_num
if val in self.keyvald:
key = self.keyvald[val]
elif val == 0 and sys.platform == 'darwin' and \
event.keycode in self._keycode_lookup:
key = self._keycode_lookup[event.keycode]
elif val < 256:
key = chr(val)
else:
key = None
# add modifier keys to the key string. Bit details originate from
# http://effbot.org/tkinterbook/tkinter-events-and-bindings.htm
# BIT_SHIFT = 0x001; BIT_CAPSLOCK = 0x002; BIT_CONTROL = 0x004;
# BIT_LEFT_ALT = 0x008; BIT_NUMLOCK = 0x010; BIT_RIGHT_ALT = 0x080;
# BIT_MB_1 = 0x100; BIT_MB_2 = 0x200; BIT_MB_3 = 0x400;
# In general, the modifier key is excluded from the modifier flag,
# however this is not the case on "darwin", so double check that
# we aren't adding repeat modifier flags to a modifier key.
if sys.platform == 'win32':
modifiers = [(17, 'alt', 'alt'),
(2, 'ctrl', 'control'),
]
elif sys.platform == 'darwin':
modifiers = [(3, 'super', 'super'),
(4, 'alt', 'alt'),
(2, 'ctrl', 'control'),
]
else:
modifiers = [(6, 'super', 'super'),
(3, 'alt', 'alt'),
(2, 'ctrl', 'control'),
]
if key is not None:
# note, shift is not added to the keys as this is already accounted for
for bitmask, prefix, key_name in modifiers:
if event.state & (1 << bitmask) and key_name not in key:
key = '{0}+{1}'.format(prefix, key)
return key
def key_press(self, event):
key = self._get_key(event)
FigureCanvasBase.key_press_event(self, key, guiEvent=event)
def key_release(self, event):
key = self._get_key(event)
FigureCanvasBase.key_release_event(self, key, guiEvent=event)
def new_timer(self, *args, **kwargs):
"""
Creates a new backend-specific subclass of :class:`backend_bases.Timer`.
This is useful for getting periodic events through the backend's native
event loop. Implemented only for backends with GUIs.
Other Parameters
----------------
interval : scalar
Timer interval in milliseconds
callbacks : list
Sequence of (func, args, kwargs) where ``func(*args, **kwargs)``
will be executed by the timer every *interval*.
"""
return TimerTk(self._tkcanvas, *args, **kwargs)
def flush_events(self):
self._master.update()
class FigureManagerTkAgg(FigureManagerBase):
"""
Attributes
----------
canvas : `FigureCanvas`
The FigureCanvas instance
num : int or str
The Figure number
toolbar : tk.Toolbar
The tk.Toolbar
window : tk.Window
The tk.Window
"""
def __init__(self, canvas, num, window):
FigureManagerBase.__init__(self, canvas, num)
self.window = window
self.window.withdraw()
self.set_window_title("Figure %d" % num)
self.canvas = canvas
self.canvas._tkcanvas.pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
self._num = num
self.toolmanager = self._get_toolmanager()
self.toolbar = self._get_toolbar()
self.statusbar = None
if self.toolmanager:
backend_tools.add_tools_to_manager(self.toolmanager)
if self.toolbar:
backend_tools.add_tools_to_container(self.toolbar)
self.statusbar = StatusbarTk(self.window, self.toolmanager)
self._shown = False
def notify_axes_change(fig):
'this will be called whenever the current axes is changed'
if self.toolmanager is not None:
pass
elif self.toolbar is not None:
self.toolbar.update()
self.canvas.figure.add_axobserver(notify_axes_change)
def _get_toolbar(self):
if matplotlib.rcParams['toolbar'] == 'toolbar2':
toolbar = NavigationToolbar2TkAgg(self.canvas, self.window)
elif matplotlib.rcParams['toolbar'] == 'toolmanager':
toolbar = ToolbarTk(self.toolmanager, self.window)
else:
toolbar = None
return toolbar
def _get_toolmanager(self):
if rcParams['toolbar'] == 'toolmanager':
toolmanager = ToolManager(self.canvas.figure)
else:
toolmanager = None
return toolmanager
def resize(self, width, height=None):
# before 09-12-22, the resize method takes a single *event*
# parameter. On the other hand, the resize method of other
# FigureManager class takes *width* and *height* parameter,
# which is used to change the size of the window. For the
# Figure.set_size_inches with forward=True work with Tk
# backend, I changed the function signature but tried to keep
# it backward compatible. -JJL
# when a single parameter is given, consider it as a event
if height is None:
width = width.width
else:
self.canvas._tkcanvas.master.geometry("%dx%d" % (width, height))
if self.toolbar is not None:
self.toolbar.configure(width=width)
def show(self):
"""
this function doesn't segfault but causes the
PyEval_RestoreThread: NULL state bug on win32
"""
_focus = windowing.FocusManager()
if not self._shown:
def destroy(*args):
self.window = None
Gcf.destroy(self._num)
self.canvas._tkcanvas.bind("<Destroy>", destroy)
self.window.deiconify()
# anim.py requires this
self.window.update()
else:
self.canvas.draw_idle()
# Raise the new window.
self.canvas.manager.window.attributes('-topmost', 1)
self.canvas.manager.window.attributes('-topmost', 0)
self._shown = True
def destroy(self, *args):
if self.window is not None:
#self.toolbar.destroy()
if self.canvas._idle_callback:
self.canvas._tkcanvas.after_cancel(self.canvas._idle_callback)
self.window.destroy()
if Gcf.get_num_fig_managers()==0:
if self.window is not None:
self.window.quit()
self.window = None
def get_window_title(self):
return self.window.wm_title()
def set_window_title(self, title):
self.window.wm_title(title)
def full_screen_toggle(self):
is_fullscreen = bool(self.window.attributes('-fullscreen'))
self.window.attributes('-fullscreen', not is_fullscreen)
class AxisMenu(object):
def __init__(self, master, naxes):
self._master = master
self._naxes = naxes
self._mbar = Tk.Frame(master=master, relief=Tk.RAISED, borderwidth=2)
self._mbar.pack(side=Tk.LEFT)
self._mbutton = Tk.Menubutton(
master=self._mbar, text="Axes", underline=0)
self._mbutton.pack(side=Tk.LEFT, padx="2m")
self._mbutton.menu = Tk.Menu(self._mbutton)
self._mbutton.menu.add_command(
label="Select All", command=self.select_all)
self._mbutton.menu.add_command(
label="Invert All", command=self.invert_all)
self._axis_var = []
self._checkbutton = []
for i in range(naxes):
self._axis_var.append(Tk.IntVar())
self._axis_var[i].set(1)
self._checkbutton.append(self._mbutton.menu.add_checkbutton(
label = "Axis %d" % (i+1),
variable=self._axis_var[i],
command=self.set_active))
self._mbutton.menu.invoke(self._mbutton.menu.index("Select All"))
self._mbutton['menu'] = self._mbutton.menu
self._mbar.tk_menuBar(self._mbutton)
self.set_active()
def adjust(self, naxes):
if self._naxes < naxes:
for i in range(self._naxes, naxes):
self._axis_var.append(Tk.IntVar())
self._axis_var[i].set(1)
self._checkbutton.append( self._mbutton.menu.add_checkbutton(
label = "Axis %d" % (i+1),
variable=self._axis_var[i],
command=self.set_active))
elif self._naxes > naxes:
for i in range(self._naxes-1, naxes-1, -1):
del self._axis_var[i]
self._mbutton.menu.forget(self._checkbutton[i])
del self._checkbutton[i]
self._naxes = naxes
self.set_active()
def get_indices(self):
a = [i for i in range(len(self._axis_var)) if self._axis_var[i].get()]
return a
def set_active(self):
self._master.set_active(self.get_indices())
def invert_all(self):
for a in self._axis_var:
a.set(not a.get())
self.set_active()
def select_all(self):
for a in self._axis_var:
a.set(1)
self.set_active()
class NavigationToolbar2TkAgg(NavigationToolbar2, Tk.Frame):
"""
Attributes
----------
canvas : `FigureCanvas`
the figure canvas on which to operate
win : tk.Window
the tk.Window which owns this toolbar
"""
def __init__(self, canvas, window):
self.canvas = canvas
self.window = window
self._idle = True
NavigationToolbar2.__init__(self, canvas)
def destroy(self, *args):
del self.message
Tk.Frame.destroy(self, *args)
def set_message(self, s):
self.message.set(s)
def draw_rubberband(self, event, x0, y0, x1, y1):
height = self.canvas.figure.bbox.height
y0 = height - y0
y1 = height - y1
if hasattr(self, "lastrect"):
self.canvas._tkcanvas.delete(self.lastrect)
self.lastrect = self.canvas._tkcanvas.create_rectangle(x0, y0, x1, y1)
#self.canvas.draw()
def release(self, event):
try: self.lastrect
except AttributeError: pass
else:
self.canvas._tkcanvas.delete(self.lastrect)
del self.lastrect
def set_cursor(self, cursor):
self.window.configure(cursor=cursord[cursor])
self.window.update_idletasks()
def _Button(self, text, file, command, extension='.gif'):
img_file = os.path.join(
rcParams['datapath'], 'images', file + extension)
im = Tk.PhotoImage(master=self, file=img_file)
b = Tk.Button(
master=self, text=text, padx=2, pady=2, image=im, command=command)
b._ntimage = im
b.pack(side=Tk.LEFT)
return b
def _Spacer(self):
# Buttons are 30px high, so make this 26px tall with padding to center it
s = Tk.Frame(
master=self, height=26, relief=Tk.RIDGE, pady=2, bg="DarkGray")
s.pack(side=Tk.LEFT, padx=5)
return s
def _init_toolbar(self):
xmin, xmax = self.canvas.figure.bbox.intervalx
height, width = 50, xmax-xmin
Tk.Frame.__init__(self, master=self.window,
width=int(width), height=int(height),
borderwidth=2)
self.update() # Make axes menu
for text, tooltip_text, image_file, callback in self.toolitems:
if text is None:
# Add a spacer; return value is unused.
self._Spacer()
else:
button = self._Button(text=text, file=image_file,
command=getattr(self, callback))
if tooltip_text is not None:
ToolTip.createToolTip(button, tooltip_text)
self.message = Tk.StringVar(master=self)
self._message_label = Tk.Label(master=self, textvariable=self.message)
self._message_label.pack(side=Tk.RIGHT)
self.pack(side=Tk.BOTTOM, fill=Tk.X)
def configure_subplots(self):
toolfig = Figure(figsize=(6,3))
window = Tk.Tk()
canvas = FigureCanvasTkAgg(toolfig, master=window)
toolfig.subplots_adjust(top=0.9)
tool = SubplotTool(self.canvas.figure, toolfig)
canvas.show()
canvas.get_tk_widget().pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
def save_figure(self, *args):
from six.moves import tkinter_tkfiledialog, tkinter_messagebox
filetypes = self.canvas.get_supported_filetypes().copy()
default_filetype = self.canvas.get_default_filetype()
# Tk doesn't provide a way to choose a default filetype,
# so we just have to put it first
default_filetype_name = filetypes.pop(default_filetype)
sorted_filetypes = ([(default_filetype, default_filetype_name)]
+ sorted(six.iteritems(filetypes)))
tk_filetypes = [(name, '*.%s' % ext) for ext, name in sorted_filetypes]
# adding a default extension seems to break the
# asksaveasfilename dialog when you choose various save types
# from the dropdown. Passing in the empty string seems to
# work - JDH!
#defaultextension = self.canvas.get_default_filetype()
defaultextension = ''
initialdir = os.path.expanduser(rcParams['savefig.directory'])
initialfile = self.canvas.get_default_filename()
fname = tkinter_tkfiledialog.asksaveasfilename(
master=self.window,
title='Save the figure',
filetypes=tk_filetypes,
defaultextension=defaultextension,
initialdir=initialdir,
initialfile=initialfile,
)
if fname in ["", ()]:
return
# Save dir for next time, unless empty str (i.e., use cwd).
if initialdir != "":
rcParams['savefig.directory'] = (
os.path.dirname(six.text_type(fname)))
try:
# This method will handle the delegation to the correct type
self.canvas.figure.savefig(fname)
except Exception as e:
tkinter_messagebox.showerror("Error saving file", str(e))
def set_active(self, ind):
self._ind = ind
self._active = [self._axes[i] for i in self._ind]
def update(self):
_focus = windowing.FocusManager()
self._axes = self.canvas.figure.axes
NavigationToolbar2.update(self)
class ToolTip(object):
"""
Tooltip recipe from
http://www.voidspace.org.uk/python/weblog/arch_d7_2006_07_01.shtml#e387
"""
@staticmethod
def createToolTip(widget, text):
toolTip = ToolTip(widget)
def enter(event):
toolTip.showtip(text)
def leave(event):
toolTip.hidetip()
widget.bind('<Enter>', enter)
widget.bind('<Leave>', leave)
def __init__(self, widget):
self.widget = widget
self.tipwindow = None
self.id = None
self.x = self.y = 0
def showtip(self, text):
"Display text in tooltip window"
self.text = text
if self.tipwindow or not self.text:
return
x, y, _, _ = self.widget.bbox("insert")
x = x + self.widget.winfo_rootx() + 27
y = y + self.widget.winfo_rooty()
self.tipwindow = tw = Tk.Toplevel(self.widget)
tw.wm_overrideredirect(1)
tw.wm_geometry("+%d+%d" % (x, y))
try:
# For Mac OS
tw.tk.call("::tk::unsupported::MacWindowStyle",
"style", tw._w,
"help", "noActivates")
except Tk.TclError:
pass
label = Tk.Label(tw, text=self.text, justify=Tk.LEFT,
background="#ffffe0", relief=Tk.SOLID, borderwidth=1)
label.pack(ipadx=1)
def hidetip(self):
tw = self.tipwindow
self.tipwindow = None
if tw:
tw.destroy()
class RubberbandTk(backend_tools.RubberbandBase):
def __init__(self, *args, **kwargs):
backend_tools.RubberbandBase.__init__(self, *args, **kwargs)
def draw_rubberband(self, x0, y0, x1, y1):
height = self.figure.canvas.figure.bbox.height
y0 = height - y0
y1 = height - y1
if hasattr(self, "lastrect"):
self.figure.canvas._tkcanvas.delete(self.lastrect)
self.lastrect = self.figure.canvas._tkcanvas.create_rectangle(
x0, y0, x1, y1)
def remove_rubberband(self):
if hasattr(self, "lastrect"):
self.figure.canvas._tkcanvas.delete(self.lastrect)
del self.lastrect
class SetCursorTk(backend_tools.SetCursorBase):
def set_cursor(self, cursor):
self.figure.canvas.manager.window.configure(cursor=cursord[cursor])
class ToolbarTk(ToolContainerBase, Tk.Frame):
def __init__(self, toolmanager, window):
ToolContainerBase.__init__(self, toolmanager)
xmin, xmax = self.toolmanager.canvas.figure.bbox.intervalx
height, width = 50, xmax - xmin
Tk.Frame.__init__(self, master=window,
width=int(width), height=int(height),
borderwidth=2)
self._toolitems = {}
self.pack(side=Tk.TOP, fill=Tk.X)
self._groups = {}
def add_toolitem(
self, name, group, position, image_file, description, toggle):
frame = self._get_groupframe(group)
button = self._Button(name, image_file, toggle, frame)
if description is not None:
ToolTip.createToolTip(button, description)
self._toolitems.setdefault(name, [])
self._toolitems[name].append(button)
def _get_groupframe(self, group):
if group not in self._groups:
if self._groups:
self._add_separator()
frame = Tk.Frame(master=self, borderwidth=0)
frame.pack(side=Tk.LEFT, fill=Tk.Y)
self._groups[group] = frame
return self._groups[group]
def _add_separator(self):
separator = Tk.Frame(master=self, bd=5, width=1, bg='black')
separator.pack(side=Tk.LEFT, fill=Tk.Y, padx=2)
def _Button(self, text, image_file, toggle, frame):
if image_file is not None:
im = Tk.PhotoImage(master=self, file=image_file)
else:
im = None
if not toggle:
b = Tk.Button(master=frame, text=text, padx=2, pady=2, image=im,
command=lambda: self._button_click(text))
else:
b = Tk.Checkbutton(master=frame, text=text, padx=2, pady=2,
image=im, indicatoron=False,
command=lambda: self._button_click(text))
b._ntimage = im
b.pack(side=Tk.LEFT)
return b
def _button_click(self, name):
self.trigger_tool(name)
def toggle_toolitem(self, name, toggled):
if name not in self._toolitems:
return
for toolitem in self._toolitems[name]:
if toggled:
toolitem.select()
else:
toolitem.deselect()
def remove_toolitem(self, name):
for toolitem in self._toolitems[name]:
toolitem.pack_forget()
del self._toolitems[name]
class StatusbarTk(StatusbarBase, Tk.Frame):
def __init__(self, window, *args, **kwargs):
StatusbarBase.__init__(self, *args, **kwargs)
xmin, xmax = self.toolmanager.canvas.figure.bbox.intervalx
height, width = 50, xmax - xmin
Tk.Frame.__init__(self, master=window,
width=int(width), height=int(height),
borderwidth=2)
self._message = Tk.StringVar(master=self)
self._message_label = Tk.Label(master=self, textvariable=self._message)
self._message_label.pack(side=Tk.RIGHT)
self.pack(side=Tk.TOP, fill=Tk.X)
def set_message(self, s):
self._message.set(s)
class SaveFigureTk(backend_tools.SaveFigureBase):
def trigger(self, *args):
from six.moves import tkinter_tkfiledialog, tkinter_messagebox
filetypes = self.figure.canvas.get_supported_filetypes().copy()
default_filetype = self.figure.canvas.get_default_filetype()
# Tk doesn't provide a way to choose a default filetype,
# so we just have to put it first
default_filetype_name = filetypes.pop(default_filetype)
sorted_filetypes = ([(default_filetype, default_filetype_name)]
+ sorted(six.iteritems(filetypes)))
tk_filetypes = [(name, '*.%s' % ext) for ext, name in sorted_filetypes]
# adding a default extension seems to break the
# asksaveasfilename dialog when you choose various save types
# from the dropdown. Passing in the empty string seems to
# work - JDH!
# defaultextension = self.figure.canvas.get_default_filetype()
defaultextension = ''
initialdir = os.path.expanduser(rcParams['savefig.directory'])
initialfile = self.figure.canvas.get_default_filename()
fname = tkinter_tkfiledialog.asksaveasfilename(
master=self.figure.canvas.manager.window,
title='Save the figure',
filetypes=tk_filetypes,
defaultextension=defaultextension,
initialdir=initialdir,
initialfile=initialfile,
)
if fname == "" or fname == ():
return
else:
if initialdir == '':
# explicitly missing key or empty str signals to use cwd
rcParams['savefig.directory'] = initialdir
else:
# save dir for next time
rcParams['savefig.directory'] = os.path.dirname(
six.text_type(fname))
try:
# This method will handle the delegation to the correct type
self.figure.savefig(fname)
except Exception as e:
tkinter_messagebox.showerror("Error saving file", str(e))
class ConfigureSubplotsTk(backend_tools.ConfigureSubplotsBase):
def __init__(self, *args, **kwargs):
backend_tools.ConfigureSubplotsBase.__init__(self, *args, **kwargs)
self.window = None
def trigger(self, *args):
self.init_window()
self.window.lift()
def init_window(self):
if self.window:
return
toolfig = Figure(figsize=(6, 3))
self.window = Tk.Tk()
canvas = FigureCanvasTkAgg(toolfig, master=self.window)
toolfig.subplots_adjust(top=0.9)
_tool = SubplotTool(self.figure, toolfig)
canvas.show()
canvas.get_tk_widget().pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
self.window.protocol("WM_DELETE_WINDOW", self.destroy)
def destroy(self, *args, **kwargs):
self.window.destroy()
self.window = None
backend_tools.ToolSaveFigure = SaveFigureTk
backend_tools.ToolConfigureSubplots = ConfigureSubplotsTk
backend_tools.ToolSetCursor = SetCursorTk
backend_tools.ToolRubberband = RubberbandTk
Toolbar = ToolbarTk
@_Backend.export
class _BackendTkAgg(_Backend):
FigureCanvas = FigureCanvasTkAgg
FigureManager = FigureManagerTkAgg
@staticmethod
def new_figure_manager_given_figure(num, figure):
"""
Create a new figure manager instance for the given figure.
"""
_focus = windowing.FocusManager()
window = Tk.Tk(className="matplotlib")
window.withdraw()
# Put a mpl icon on the window rather than the default tk icon.
# Tkinter doesn't allow colour icons on linux systems, but tk>=8.5 has
# a iconphoto command which we call directly. Source:
# http://mail.python.org/pipermail/tkinter-discuss/2006-November/000954.html
icon_fname = os.path.join(
rcParams['datapath'], 'images', 'matplotlib.ppm')
icon_img = Tk.PhotoImage(file=icon_fname)
try:
window.tk.call('wm', 'foobar', window._w, icon_img)
except Exception as exc:
# log the failure (due e.g. to Tk version), but carry on
verbose.report('Could not load matplotlib icon: %s' % exc)
canvas = FigureCanvasTkAgg(figure, master=window)
manager = FigureManagerTkAgg(canvas, num, window)
if matplotlib.is_interactive():
manager.show()
canvas.draw_idle()
return manager
@staticmethod
def trigger_manager_draw(manager):
manager.show()
@staticmethod
def mainloop():
Tk.mainloop()
| mit |
AndreasMadsen/tensorflow | tensorflow/examples/learn/iris_run_config.py | 86 | 2087 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of DNNClassifier for Iris plant dataset, with run config."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from sklearn import cross_validation
from sklearn import datasets
from sklearn import metrics
import tensorflow as tf
def main(unused_argv):
# Load dataset.
iris = datasets.load_iris()
x_train, x_test, y_train, y_test = cross_validation.train_test_split(
iris.data, iris.target, test_size=0.2, random_state=42)
# You can define you configurations by providing a RunConfig object to
# estimator to control session configurations, e.g. num_cores
# and gpu_memory_fraction
run_config = tf.contrib.learn.estimators.RunConfig(
num_cores=3, gpu_memory_fraction=0.6)
# Build 3 layer DNN with 10, 20, 10 units respectively.
feature_columns = tf.contrib.learn.infer_real_valued_columns_from_input(
x_train)
classifier = tf.contrib.learn.DNNClassifier(feature_columns=feature_columns,
hidden_units=[10, 20, 10],
n_classes=3,
config=run_config)
# Fit and predict.
classifier.fit(x_train, y_train, steps=200)
predictions = list(classifier.predict(x_test, as_iterable=True))
score = metrics.accuracy_score(y_test, predictions)
print('Accuracy: {0:f}'.format(score))
if __name__ == '__main__':
tf.app.run()
| apache-2.0 |
DonBeo/statsmodels | statsmodels/datasets/copper/data.py | 28 | 2316 | """World Copper Prices 1951-1975 dataset."""
__docformat__ = 'restructuredtext'
COPYRIGHT = """Used with express permission from the original author,
who retains all rights."""
TITLE = "World Copper Market 1951-1975 Dataset"
SOURCE = """
Jeff Gill's `Generalized Linear Models: A Unified Approach`
http://jgill.wustl.edu/research/books.html
"""
DESCRSHORT = """World Copper Market 1951-1975"""
DESCRLONG = """This data describes the world copper market from 1951 through 1975. In an
example, in Gill, the outcome variable (of a 2 stage estimation) is the world
consumption of copper for the 25 years. The explanatory variables are the
world consumption of copper in 1000 metric tons, the constant dollar adjusted
price of copper, the price of a substitute, aluminum, an index of real per
capita income base 1970, an annual measure of manufacturer inventory change,
and a time trend.
"""
NOTE = """
Number of Observations - 25
Number of Variables - 6
Variable name definitions::
WORLDCONSUMPTION - World consumption of copper (in 1000 metric tons)
COPPERPRICE - Constant dollar adjusted price of copper
INCOMEINDEX - An index of real per capita income (base 1970)
ALUMPRICE - The price of aluminum
INVENTORYINDEX - A measure of annual manufacturer inventory trend
TIME - A time trend
Years are included in the data file though not returned by load.
"""
from numpy import recfromtxt, column_stack, array
from statsmodels.datasets import utils as du
from os.path import dirname, abspath
def load():
"""
Load the copper data and returns a Dataset class.
Returns
--------
Dataset instance:
See DATASET_PROPOSAL.txt for more information.
"""
data = _get_data()
return du.process_recarray(data, endog_idx=0, dtype=float)
def _get_data():
filepath = dirname(abspath(__file__))
data = recfromtxt(open(filepath + '/copper.csv', 'rb'), delimiter=",",
names=True, dtype=float, usecols=(1,2,3,4,5,6))
return data
def load_pandas():
"""
Load the copper data and returns a Dataset class.
Returns
--------
Dataset instance:
See DATASET_PROPOSAL.txt for more information.
"""
data = _get_data()
return du.process_recarray_pandas(data, endog_idx=0, dtype=float)
| bsd-3-clause |
enoordeh/StatisticalMethods | examples/Cepheids/straightline_utils.py | 14 | 2858 | # numpy: numerical library
import numpy as np
# avoid broken installs by forcing Agg backend...
#import matplotlib
#matplotlib.use('Agg')
# pylab: matplotlib's matlab-like interface
import pylab as plt
# The data we will fit:
# x, y, sigma_y
data1 = np.array([[201,592,61],[244,401,25],[47,583,38],[287,402,15],[203,495,21],
[58,173,15],[210,479,27],[202,504,14],[198,510,30],[158,416,16],
[165,393,14],[201,442,25],[157,317,52],[131,311,16],[166,400,34],
[160,337,31],[186,423,42],[125,334,26],[218,533,16],[146,344,22]]).astype(float)
# plotting limits
xlimits = [0,250]
ylimits = [100,600]
title_prefix = 'Straight line'
plot_format = '.png'
mlimits = [1.9, 2.6]
blimits = [-20, 80]
mlo,mhi = mlimits
blo,bhi = blimits
slo,shi = [0.001,100]
def pdf_contour_levels(p):
sortp = np.sort(p.ravel())
cump = sortp.cumsum()
return [sortp[cump > cump.max() * f].min()
for f in [0.32, 0.05]]
def plot_mcmc_results(chain):
# Pull m and b arrays out of the Markov chain.
mm = [m for b,m in chain]
bb = [b for b,m in chain]
# Scatterplot of m,b posterior samples
plt.clf()
plt.contour(bgrid, mgrid, posterior, pdf_contour_levels(posterior))
plt.plot(bb, mm, 'b.', alpha=0.1)
plot_mb_setup()
plt.show()
# Histograms
import triangle
triangle.corner(chain, labels=['b','m'], extents=[0.99]*2)
plt.show()
# Traces
plt.clf()
plt.subplot(2,1,1)
plt.plot(mm, 'k-')
plt.ylim(mlo,mhi)
plt.ylabel('m')
plt.subplot(2,1,2)
plt.plot(bb, 'k-')
plt.ylabel('b')
plt.ylim(blo,bhi)
plt.show()
def plot_mb_setup():
plt.xlabel('intercept b')
plt.ylabel('slope m')
plt.axis([blo,bhi, mlo,mhi])
def get_data_no_outliers():
# pull out the x, y, and sigma_y columns, which have been packed into the
# "data1" matrix. "data1" has shape (20,3). ":" means "everything in
# that dimension". Some of the first 5 points are outliers so for this
# part we only grab from index 5 on, with magic "5:"
x = data1[5:,0]
y = data1[5:,1]
sigmay = data1[5:,2]
return (x, y, sigmay)
def get_data_with_outliers():
x = data1[:,0]
y = data1[:,1]
sigmay = data1[:,2]
return x,y,sigmay
# Plot data with error bars, standard axis limits, etc.
def plot_yerr(x, y, sigmay):
# plot data with error bars
plt.errorbar(x, y, yerr=sigmay, fmt='.', ms=7, lw=1, color='k')
# if you put '$' in you can make Latex labels
plt.xlabel('$x$')
plt.ylabel('$y$')
plt.xlim(*xlimits)
plt.ylim(*ylimits)
plt.title(title_prefix)
# Plot a y = mx + b line.
def plot_line(m, b, **kwargs):
x = np.array(xlimits)
y = b + m*x
p = plt.plot(x, y, 'k-', alpha=0.5, **kwargs)
plt.xlim(*xlimits)
plt.ylim(*ylimits)
return p
| gpl-2.0 |
mbayon/TFG-MachineLearning | vbig/lib/python2.7/site-packages/sklearn/setup.py | 69 | 3201 | import os
from os.path import join
import warnings
from sklearn._build_utils import maybe_cythonize_extensions
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
from numpy.distutils.system_info import get_info, BlasNotFoundError
import numpy
libraries = []
if os.name == 'posix':
libraries.append('m')
config = Configuration('sklearn', parent_package, top_path)
# submodules with build utilities
config.add_subpackage('__check_build')
config.add_subpackage('_build_utils')
# submodules which do not have their own setup.py
# we must manually add sub-submodules & tests
config.add_subpackage('covariance')
config.add_subpackage('covariance/tests')
config.add_subpackage('cross_decomposition')
config.add_subpackage('cross_decomposition/tests')
config.add_subpackage('feature_selection')
config.add_subpackage('feature_selection/tests')
config.add_subpackage('gaussian_process')
config.add_subpackage('gaussian_process/tests')
config.add_subpackage('mixture')
config.add_subpackage('mixture/tests')
config.add_subpackage('model_selection')
config.add_subpackage('model_selection/tests')
config.add_subpackage('neural_network')
config.add_subpackage('neural_network/tests')
config.add_subpackage('preprocessing')
config.add_subpackage('preprocessing/tests')
config.add_subpackage('semi_supervised')
config.add_subpackage('semi_supervised/tests')
# submodules which have their own setup.py
# leave out "linear_model" and "utils" for now; add them after cblas below
config.add_subpackage('cluster')
config.add_subpackage('datasets')
config.add_subpackage('decomposition')
config.add_subpackage('ensemble')
config.add_subpackage('externals')
config.add_subpackage('feature_extraction')
config.add_subpackage('manifold')
config.add_subpackage('metrics')
config.add_subpackage('metrics/cluster')
config.add_subpackage('neighbors')
config.add_subpackage('tree')
config.add_subpackage('svm')
# add cython extension module for isotonic regression
config.add_extension('_isotonic',
sources=['_isotonic.pyx'],
include_dirs=[numpy.get_include()],
libraries=libraries,
)
# some libs needs cblas, fortran-compiled BLAS will not be sufficient
blas_info = get_info('blas_opt', 0)
if (not blas_info) or (
('NO_ATLAS_INFO', 1) in blas_info.get('define_macros', [])):
config.add_library('cblas',
sources=[join('src', 'cblas', '*.c')])
warnings.warn(BlasNotFoundError.__doc__)
# the following packages depend on cblas, so they have to be build
# after the above.
config.add_subpackage('linear_model')
config.add_subpackage('utils')
# add the test directory
config.add_subpackage('tests')
maybe_cythonize_extensions(top_path, config)
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.