repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
aclairekeum/SoftwareSystems | hw02/wget-1.15/src/timing.py | 24 | 1661 | """Example code for Software Systems at Olin College.
Copyright 2014 Allen Downey
License: Creative Commons Attribution-ShareAlike 3.0
"""
import glob
import sys
import matplotlib.pyplot as pyplot
def read_file(filename):
"""Reads a timing file and returns a list of (t, s) pairs.
filename: string timing file generated by the instrumented version
of wget
t is a time in ms
s is the total number of bytes presented to the application layer
"""
res = []
# add a fake packet at the beginning so the connect time is visible
lasts = 1460
for line in open(filename):
t, s = [float(x) for x in line.split()]
res.append((t, lasts))
res.append((t, s))
lasts = s
return res
def make_graph(dirname):
"""Makes a graph of the timing charts in the given directory.
Graphs all files in the directory that match the pattern
timing.[0-9]*.[0-9]*
dirname: string
"""
pattern = '%s/timing.[0-9]*.[0-9]*' % dirname
filenames = glob.glob(pattern)
data = []
for filename in filenames:
pairs = read_file(filename)
data.append(pairs)
for pairs in data:
xs, ys = zip(*pairs)
pyplot.plot(xs, ys, alpha=0.4, linewidth=1)
pyplot.xlabel('time (ms)')
pyplot.ylabel('bytes received')
if dirname == '.':
filename = 'timing.png'
else:
filename = 'timing.%s.png' % dirname
print 'Writing', filename
pyplot.savefig(filename, format='png', dpi=150)
pyplot.show()
def main(script, dirname='.'):
make_graph(dirname)
if __name__ == '__main__':
main(*sys.argv)
| gpl-3.0 |
meduz/scikit-learn | sklearn/setup.py | 69 | 3201 | import os
from os.path import join
import warnings
from sklearn._build_utils import maybe_cythonize_extensions
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
from numpy.distutils.system_info import get_info, BlasNotFoundError
import numpy
libraries = []
if os.name == 'posix':
libraries.append('m')
config = Configuration('sklearn', parent_package, top_path)
# submodules with build utilities
config.add_subpackage('__check_build')
config.add_subpackage('_build_utils')
# submodules which do not have their own setup.py
# we must manually add sub-submodules & tests
config.add_subpackage('covariance')
config.add_subpackage('covariance/tests')
config.add_subpackage('cross_decomposition')
config.add_subpackage('cross_decomposition/tests')
config.add_subpackage('feature_selection')
config.add_subpackage('feature_selection/tests')
config.add_subpackage('gaussian_process')
config.add_subpackage('gaussian_process/tests')
config.add_subpackage('mixture')
config.add_subpackage('mixture/tests')
config.add_subpackage('model_selection')
config.add_subpackage('model_selection/tests')
config.add_subpackage('neural_network')
config.add_subpackage('neural_network/tests')
config.add_subpackage('preprocessing')
config.add_subpackage('preprocessing/tests')
config.add_subpackage('semi_supervised')
config.add_subpackage('semi_supervised/tests')
# submodules which have their own setup.py
# leave out "linear_model" and "utils" for now; add them after cblas below
config.add_subpackage('cluster')
config.add_subpackage('datasets')
config.add_subpackage('decomposition')
config.add_subpackage('ensemble')
config.add_subpackage('externals')
config.add_subpackage('feature_extraction')
config.add_subpackage('manifold')
config.add_subpackage('metrics')
config.add_subpackage('metrics/cluster')
config.add_subpackage('neighbors')
config.add_subpackage('tree')
config.add_subpackage('svm')
# add cython extension module for isotonic regression
config.add_extension('_isotonic',
sources=['_isotonic.pyx'],
include_dirs=[numpy.get_include()],
libraries=libraries,
)
# some libs needs cblas, fortran-compiled BLAS will not be sufficient
blas_info = get_info('blas_opt', 0)
if (not blas_info) or (
('NO_ATLAS_INFO', 1) in blas_info.get('define_macros', [])):
config.add_library('cblas',
sources=[join('src', 'cblas', '*.c')])
warnings.warn(BlasNotFoundError.__doc__)
# the following packages depend on cblas, so they have to be build
# after the above.
config.add_subpackage('linear_model')
config.add_subpackage('utils')
# add the test directory
config.add_subpackage('tests')
maybe_cythonize_extensions(top_path, config)
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| bsd-3-clause |
nphilipp/aubio | python/demos/demo_waveform_plot.py | 10 | 2099 | #! /usr/bin/env python
import sys
from aubio import pvoc, source
from numpy import zeros, hstack
def get_waveform_plot(filename, samplerate = 0, block_size = 4096, ax = None, downsample = 2**4):
import matplotlib.pyplot as plt
if not ax:
fig = plt.figure()
ax = fig.add_subplot(111)
hop_s = block_size
allsamples_max = zeros(0,)
downsample = downsample # to plot n samples / hop_s
a = source(filename, samplerate, hop_s) # source file
if samplerate == 0: samplerate = a.samplerate
total_frames = 0
while True:
samples, read = a()
# keep some data to plot it later
new_maxes = (abs(samples.reshape(hop_s/downsample, downsample))).max(axis=0)
allsamples_max = hstack([allsamples_max, new_maxes])
total_frames += read
if read < hop_s: break
allsamples_max = (allsamples_max > 0) * allsamples_max
allsamples_max_times = [ ( float (t) / downsample ) * hop_s for t in range(len(allsamples_max)) ]
ax.plot(allsamples_max_times, allsamples_max, '-b')
ax.plot(allsamples_max_times, -allsamples_max, '-b')
ax.axis(xmin = allsamples_max_times[0], xmax = allsamples_max_times[-1])
set_xlabels_sample2time(ax, allsamples_max_times[-1], samplerate)
return ax
def set_xlabels_sample2time(ax, latest_sample, samplerate):
ax.axis(xmin = 0, xmax = latest_sample)
if latest_sample / float(samplerate) > 60:
ax.set_xlabel('time (mm:ss)')
ax.set_xticklabels([ "%02d:%02d" % (t/float(samplerate)/60, (t/float(samplerate))%60) for t in ax.get_xticks()[:-1]], rotation = 50)
else:
ax.set_xlabel('time (ss.mm)')
ax.set_xticklabels([ "%02d.%02d" % (t/float(samplerate), 100*((t/float(samplerate))%1) ) for t in ax.get_xticks()[:-1]], rotation = 50)
if __name__ == '__main__':
import matplotlib.pyplot as plt
if len(sys.argv) < 2:
print "Usage: %s <filename>" % sys.argv[0]
else:
for soundfile in sys.argv[1:]:
get_waveform_plot(soundfile)
# display graph
plt.show()
| gpl-3.0 |
jpinsonault/android_sensor_logger | python_scripts/OrientationClusterer.py | 1 | 4258 | import numpy as np
from sklearn import mixture
import matplotlib.pyplot as plt
from LogEntry import LogEntry
from datetime import datetime
from matplotlib.dates import DayLocator, HourLocator, DateFormatter, drange
from numpy import arange
from os.path import isfile
import json
def cache_dict(filename):
"""
For functions that retrieve data in a json-writable format
Checks if filename exists, if so gets the data from the file.
If not, gets the data normally then caches it in the file
"""
def wrapper(function):
def wrapped(*args):
data = None
if isfile(filename):
with open(filename, 'r') as in_file:
data = json.load(in_file)
else:
data = function(*args)
with open(filename, 'w') as out_file:
out_file.write(json.dumps(data))
return data
return wrapped
return wrapper
NEAR = 0.0
FAR = 5.0
# (x,y,z,proximity)
REFERENCE_DATA = {
"flat-screen-down": [(-0.15322891, -0.15322891, -9.80665, NEAR)],
"flat-screen-up": [(0.15322891, -0.15322891, 9.959879, FAR)],
"in-hand-portrait": [
(-0.30645782, 5.8226986, 7.8146744, FAR),
(0.61291564, 3.064578, 9.346964, FAR),
(0.7661445, 3.064578, 9.346964, FAR),
(0.61291564, 3.217807, 9.346964, FAR),
],
"in-hand-landscape": [
(-5.363012, 0.61291564, 8.580819, FAR),
(-5.209783, 0.7661445, 8.42759, FAR),
(-5.209783, 0.45968673, 8.734048, FAR),
],
"standing-pocket-upside-down": [
(2.2984335, -8.274361, -0.91937345, NEAR),
(-2.7581203, -9.653421, -0.15322891, NEAR),
(0.91937345, -9.80665, 0.15322891, NEAR)
],
"standing-pocket-rightside-up": [
(-3.371036, 8.887277, 0.15322891, NEAR),
(3.6774938, 8.734048, 0.0, NEAR)
],
"sitting-pocket-upside-down": [
(7.3549876, -1.3790601, -6.2823853, NEAR),
(-7.3549876, -2.2984335, 6.2823853, NEAR)
],
"sitting-pocket-rightside-up": [
(-7.3549876, 0.91937345, -6.2823853, NEAR),
(7.8146744, 1.0726024, 6.129156, NEAR)
],
}
class OrientationClusterer:
"""Class for clustering accelerometer data and retrieving data"""
time_stamp_format = '%H:%M:%S %m/%d/%Y'
def __init__(self, gmm, cluster_on=[""]):
self.gmm = gmm
self.accelerometer_data = self.get_accelerometer_data()
self.predictions = None
# Format it in a numpy array
self.data_array = self.to_numpy()
self.is_fitted = False
self.is_predicted = False
@cache_dict("accelerometer_data.json")
def get_accelerometer_data(self):
# Sort the data
all_data = sorted(LogEntry.select(), key=lambda row: datetime.strptime(row.timestamp, self.time_stamp_format))
accelerometer_data = [{"timestamp": row.timestamp, "light": row.light_reading, "proximity": row.proximity_reading,
"x": row.x_reading, "y": row.y_reading, "z": row.z_reading} for row in all_data]
return accelerometer_data
def fit(self):
# Skip if already fitted
if self.is_fitted:
return
self.gmm.fit(self.data_array)
self.is_fitted = True
def to_numpy(self):
"""
Returns a numpy array of the log data to cluster on
"""
fields = [
"x", "y", "z",
"proximity"
]
return np.array([[row[field] for field in fields] for row in self.accelerometer_data])
def predict(self, data=None):
if data is None:
data = self.data_array
self.predictions = self.gmm.predict(data)
return self.predictions
def classify(self):
"""
Assign names to the clusters
Returns a dict to translate the cluster number to a name
"""
self.fit()
ids_to_names = {name: set() for name in REFERENCE_DATA}
for name, reference_points in REFERENCE_DATA.iteritems():
for point in reference_points:
prediction = self.predict([point])
ids_to_names[name] = ids_to_names[name].union(prediction)
return ids_to_names
| mit |
iproduct/course-social-robotics | image-recognition-python-new/train_model.py | 3 | 1454 | # USAGE
# python train_model.py --embeddings output/embeddings.pickle \
# --recognizer output/recognizer.pickle --le output/le.pickle
# import the necessary packages
from sklearn.preprocessing import LabelEncoder
from sklearn.svm import SVC
import argparse
import pickle
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-e", "--embeddings", required=True,
help="path to serialized db of facial embeddings")
ap.add_argument("-r", "--recognizer", required=True,
help="path to output model trained to recognize faces")
ap.add_argument("-l", "--le", required=True,
help="path to output label encoder")
args = vars(ap.parse_args())
# load the face embeddings
print("[INFO] loading face embeddings...")
data = pickle.loads(open(args["embeddings"], "rb").read())
# encode the labels
print("[INFO] encoding labels...")
le = LabelEncoder()
labels = le.fit_transform(data["names"])
# train the model used to accept the 128-d embeddings of the face and
# then produce the actual face recognition
print("[INFO] training model...")
recognizer = SVC(C=1.0, kernel="linear", probability=True)
recognizer.fit(data["embeddings"], labels)
# write the actual face recognition model to disk
f = open(args["recognizer"], "wb")
f.write(pickle.dumps(recognizer))
f.close()
# write the label encoder to disk
f = open(args["le"], "wb")
f.write(pickle.dumps(le))
f.close() | gpl-2.0 |
chengsoonong/digbeta | dchen/tour/src/inference.py | 2 | 14671 | import sys
import numpy as np
import pandas as pd
import heapq as hq
import itertools
import pulp
class HeapTerm: # an item in heapq (min-heap)
def __init__(self, priority, task):
self.priority = priority
self.task = task
self.string = str(priority) + ': ' + str(task)
def __lt__(self, other):
return self.priority < other.priority
def __repr__(self):
return self.string
def __str__(self):
return self.string
def do_inference_brute_force(ps, L, M, unary_params, pw_params, unary_features, pw_features,
y_true=None, y_true_list=None, debug=False, top=5):
"""
Inference using brute force search (for sanity check), could be:
- Train/prediction inference for single-label SSVM
- Train/prediction inference for multi-label SSVM
"""
assert(L > 1)
assert(L <= M)
assert(ps >= 0)
assert(ps < M)
assert(top > 0)
if y_true is not None:
assert(y_true_list is not None and type(y_true_list) == list)
if y_true is not None:
top = 1
Cu = np.zeros(M, dtype=np.float) # unary_param[p] x unary_features[p]
Cp = np.zeros((M, M), dtype=np.float) # pw_param[pi, pj] x pw_features[pi, pj]
# a intermediate POI should NOT be the start POI, NO self-loops
for pi in range(M):
Cu[pi] = np.dot(unary_params[pi, :], unary_features[pi, :]) # if pi != ps else -np.inf
for pj in range(M):
Cp[pi, pj] = -np.inf if (pj == ps or pi == pj) else np.dot(pw_params[pi, pj, :], pw_features[pi, pj, :])
Q = []
for x in itertools.permutations([p for p in range(M) if p != ps], int(L - 1)):
y = [ps] + list(x)
score = 0
if y_true is not None and np.any([np.all(np.array(y) == np.asarray(yj)) for yj in y_true_list]) is True:
continue
for j in range(1, L):
score += Cp[y[j - 1], y[j]] + Cu[y[j]]
if y_true is not None:
score += np.sum(np.asarray(y) != np.asarray(y_true))
if len(Q) < top:
hq.heappush(Q, HeapTerm(score, np.array(y)))
else:
hq.heappushpop(Q, HeapTerm(score, np.array(y))) # pop the smallest, then push
results = []
scores = []
while len(Q) > 0:
hterm = hq.heappop(Q)
results.append(hterm.task)
scores.append(hterm.priority)
# reverse the order: smallest -> largest => largest -> smallest
results.reverse()
scores.reverse()
if debug is True:
for score, y in zip(scores, results):
print(score, y)
if y_true is not None:
results = results[0]
return results
def do_inference_greedy(ps, L, M, unary_params, pw_params, unary_features, pw_features, y_true=None, y_true_list=None):
"""
Inference using greedy search (baseline), could be:
- Train/prediction inference for single-label SSVM
- Prediction inference for multi-label SSVM, no guaranteed for training
"""
assert(L > 1)
assert(L <= M)
assert(ps >= 0)
assert(ps < M)
if y_true is not None:
assert(y_true_list is not None and type(y_true_list) == list)
Cu = np.zeros(M, dtype=np.float) # unary_param[p] x unary_features[p]
Cp = np.zeros((M, M), dtype=np.float) # pw_param[pi, pj] x pw_features[pi, pj]
# a intermediate POI should NOT be the start POI, NO self-loops
for pi in range(M):
Cu[pi] = np.dot(unary_params[pi, :], unary_features[pi, :]) # if pi != ps else -np.inf
for pj in range(M):
Cp[pi, pj] = -np.inf if (pj == ps or pi == pj) else np.dot(pw_params[pi, pj, :], pw_features[pi, pj, :])
y_hat = [ps]
for t in range(1, L):
candidate_points = [p for p in range(M) if p not in y_hat]
p = y_hat[-1]
# maxix = np.argmax([Cp[p, p1] + Cu[p1] + float(p1 != y_true[t]) if y_true is not None else \
# Cp[p, p1] + Cu[p1] for p1 in candidate_points])
scores = [Cp[p, p1] + Cu[p1] + float(p1 != y_true[t]) if y_true is not None
else Cp[p, p1] + Cu[p1] for p1 in candidate_points]
indices = list(np.argsort(-np.asarray(scores)))
if t < L - 1 or y_true is None:
y_hat.append(candidate_points[indices[0]])
else:
for j in range(len(candidate_points)):
y = y_hat + [candidate_points[indices[j]]]
if not np.any([np.all(np.asarray(y) == np.asarray(yj)) for yj in y_true_list]):
y_hat.append(candidate_points[indices[j]])
break
if len(y_hat) < L:
sys.stderr.write('Greedy inference EQUALS (one of) ground truth, return ground truth\n')
y_hat.append(candidate_points[indices[-1]])
return [np.asarray(y_hat)]
def do_inference_viterbi_brute_force(ps, L, M, unary_params, pw_params, unary_features, pw_features):
"""
Heuristic to skip repeated POIs in predictions by Viterbi
"""
y_hat = do_inference_viterbi(ps, L, M, unary_params, pw_params, unary_features, pw_features)
pois = set(y_hat[0][1:])
Cu = np.zeros(M, dtype=np.float) # unary_param[p] x unary_features[p]
Cp = np.zeros((M, M), dtype=np.float) # pw_param[pi, pj] x pw_features[pi, pj]
# a intermediate POI should NOT be the start POI, NO self-loops
for pi in range(M):
Cu[pi] = np.dot(unary_params[pi, :], unary_features[pi, :]) # if pi != ps else -np.inf
for pj in range(M):
Cp[pi, pj] = -np.inf if (pj == ps or pi == pj) else np.dot(pw_params[pi, pj, :], pw_features[pi, pj, :])
y_best = None
best_score = -np.inf
for x in itertools.permutations(sorted(pois), len(pois)):
y = [ps] + list(x)
score = 0
for j in range(1, len(y)):
score += Cp[y[j - 1], y[j]] + Cu[y[j]]
if best_score < score:
best_score = score
y_best = y
assert(y_best is not None)
return [np.asarray(y_best)]
def do_inference_heuristic(ps, L, M, unary_params, pw_params, unary_features, pw_features):
"""
Heuristic to skip repeated POIs in predictions by Viterbi
"""
result = []
y_hat = do_inference_viterbi(ps, L, M, unary_params, pw_params, unary_features, pw_features)
for p in y_hat[0]:
if p not in result:
result.append(p)
return [np.asarray(result)]
def do_inference_viterbi(ps, L, M, unary_params, pw_params, unary_features, pw_features, y_true=None, y_true_list=None):
"""
Inference using the Viterbi algorithm, could be:
- Train/prediction inference for single-label SSVM
- Prediction inference for multi-label SSVM
"""
assert(L > 1)
assert(L <= M)
assert(ps >= 0)
assert(ps < M)
if y_true is not None:
assert(y_true_list is not None and type(y_true_list) == list)
assert(len(y_true_list) == 1)
Cu = np.zeros(M, dtype=np.float) # unary_param[p] x unary_features[p]
Cp = np.zeros((M, M), dtype=np.float) # pw_param[pi, pj] x pw_features[pi, pj]
# a intermediate POI should NOT be the start POI, NO self-loops
for pi in range(M):
Cu[pi] = np.dot(unary_params[pi, :], unary_features[pi, :]) # if pi != ps else -np.inf
for pj in range(M):
Cp[pi, pj] = -np.inf if (pj == ps or pi == pj) else np.dot(pw_params[pi, pj, :], pw_features[pi, pj, :])
A = np.zeros((L - 1, M), dtype=np.float) # scores matrix
B = np.ones((L - 1, M), dtype=np.int) * (-1) # backtracking pointers
for p in range(M): # ps--p
A[0, p] = Cp[ps, p] + Cu[p]
# if y_true is not None and p != ps: A[0, p] += float(p != y_true[1])/L # loss term: normalised
if y_true is not None and p != ps:
A[0, p] += float(p != y_true[1])
B[0, p] = ps
for t in range(0, L - 2):
for p in range(M):
# loss = float(p != y_true[l+2])/L if y_true is not None else 0 # loss term: normlised
loss = float(p != y_true[t + 2]) if y_true is not None else 0
scores = [A[t, p1] + Cp[p1, p] + Cu[p] for p1 in range(M)] # ps~~p1--p
maxix = np.argmax(scores)
A[t + 1, p] = scores[maxix] + loss
# B[l+1, p] = np.array(range(N))[maxix]
B[t + 1, p] = maxix
y_hat = [np.argmax(A[L - 2, :])]
p, t = y_hat[-1], L - 2
while t >= 0:
y_hat.append(B[t, p])
p, t = y_hat[-1], t - 1
y_hat.reverse()
return [np.asarray(y_hat)]
def do_inference_ILP_topk(ps, L, M, unary_params, pw_params, unary_features, pw_features, top=10, DIVERSITY=True):
if DIVERSITY is True:
results = []
good_results = []
while top > 0:
predicted = results if len(results) > 0 else None
y_hat = do_inference_ILP(ps, L, M, unary_params, pw_params, unary_features, pw_features,
predicted_list=predicted)
results.append(y_hat[0])
if len(good_results) == 0 or len(set(y_hat[0]) - set(good_results[-1])) > 0:
good_results.append(y_hat[0])
top -= 1
return good_results
else:
results = []
for k in range(top):
predicted = results if len(results) > 0 else None
y_hat = do_inference_ILP(ps, L, M, unary_params, pw_params, unary_features, pw_features,
predicted_list=predicted)
results.append(y_hat[0])
return results
def do_inference_ILP(ps, L, M, unary_params, pw_params, unary_features, pw_features, y_true=None,
y_true_list=None, predicted_list=None, n_threads=4, USE_GUROBI=True):
"""
Inference using integer linear programming (ILP), could be:
- Train/prediction inference for single-label SSVM (NOTE: NOT Hamming loss)
- Prediction inference for multi-label SSVM
"""
assert(L > 1)
assert(L <= M)
assert(ps >= 0)
assert(ps < M)
if y_true is not None:
assert(y_true_list is not None and type(y_true_list) == list)
assert(len(y_true_list) == 1)
assert(predicted_list is None)
if predicted_list is not None:
assert(y_true is None and y_true_list is None)
# when the parameters are very small, GUROBI will suffer from precision problems
# scaling parameters
unary_params = 1e6 * unary_params
pw_params = 1e6 * pw_params
p0 = str(ps)
pois = [str(p) for p in range(M)] # create a string list for each POI
pb = pulp.LpProblem('Inference_ILP', pulp.LpMaximize) # create problem
# visit_i_j = 1 means POI i and j are visited in sequence
visit_vars = pulp.LpVariable.dicts('visit', (pois, pois), 0, 1, pulp.LpInteger)
# isend_l = 1 means POI l is the END POI of trajectory
isend_vars = pulp.LpVariable.dicts('isend', pois, 0, 1, pulp.LpInteger)
# a dictionary contains all dummy variables
dummy_vars = pulp.LpVariable.dicts('u', [x for x in pois if x != p0], 2, M, pulp.LpInteger)
# add objective
objlist = []
for pi in pois: # from
for pj in pois: # to
objlist.append(visit_vars[pi][pj] * (np.dot(unary_params[int(pj)], unary_features[int(pj)]) +
np.dot(pw_params[int(pi), int(pj)], pw_features[int(pi), int(pj)])))
if y_true is not None: # Loss: normalised number of mispredicted POIs, Hamming loss is non-linear of 'visit'
objlist.append(1)
for j in range(M):
pj = pois[j]
for k in range(1, L):
pk = str(y_true[k])
# objlist.append(-1.0 * visit_vars[pj][pk] / L) # loss term: normalised
objlist.append(-1.0 * visit_vars[pj][pk])
pb += pulp.lpSum(objlist), 'Objective'
# add constraints, each constraint should be in ONE line
pb += pulp.lpSum([visit_vars[pi][pi] for pi in pois]) == 0, 'NoSelfLoops'
pb += pulp.lpSum([visit_vars[p0][pj] for pj in pois]) == 1, 'StartAt_p0'
pb += pulp.lpSum([visit_vars[pi][p0] for pi in pois]) == 0, 'NoIncoming_p0'
pb += pulp.lpSum([visit_vars[pi][pj] for pi in pois for pj in pois]) == L - 1, 'Length'
pb += pulp.lpSum([isend_vars[pi] for pi in pois]) == 1, 'OneEnd'
pb += isend_vars[p0] == 0, 'StartNotEnd'
for pk in [x for x in pois if x != p0]:
pb += pulp.lpSum([visit_vars[pi][pk] for pi in pois]) == isend_vars[pk] + \
pulp.lpSum([visit_vars[pk][pj] for pj in pois if pj != p0]), 'ConnectedAt_' + pk
pb += pulp.lpSum([visit_vars[pi][pk] for pi in pois]) <= 1, 'Enter_' + pk + '_AtMostOnce'
pb += pulp.lpSum([visit_vars[pk][pj] for pj in pois if pj != p0]) + isend_vars[pk] <= 1, \
'Leave_' + pk + '_AtMostOnce'
for pi in [x for x in pois if x != p0]:
for pj in [y for y in pois if y != p0]:
pb += dummy_vars[pi] - dummy_vars[pj] + 1 <= (M - 1) * (1 - visit_vars[pi][pj]), \
'SubTourElimination_' + pi + '_' + pj
# additional constraints/cuts to filtering out specified sequences
if predicted_list is not None:
for j in range(len(predicted_list)):
y = predicted_list[j]
pb += pulp.lpSum([visit_vars[str(y[k])][str(y[k + 1])] for k in range(len(y) - 1)]) <= (len(y) - 2), \
'exclude_%dth' % j
pb.writeLP("traj_tmp.lp")
# solve problem: solver should be available in PATH, default solver is CBC
if USE_GUROBI is True:
# gurobi_options = [('TimeLimit', '7200'), ('Threads', str(n_threads)), ('NodefileStart', '0.2'), ('Cuts', '2')]
gurobi_options = [('TimeLimit', '10800'), ('Threads', str(n_threads)), ('NodefileStart', '0.5')]
pb.solve(pulp.GUROBI_CMD(path='gurobi_cl', options=gurobi_options)) # GUROBI
else:
pb.solve(pulp.COIN_CMD(path='cbc', options=['-threads', str(n_threads), '-strategy', '1', '-maxIt', '2000000']))
visit_mat = pd.DataFrame(data=np.zeros((len(pois), len(pois)), dtype=np.float), index=pois, columns=pois)
isend_vec = pd.Series(data=np.zeros(len(pois), dtype=np.float), index=pois)
for pi in pois:
isend_vec.loc[pi] = isend_vars[pi].varValue
for pj in pois:
visit_mat.loc[pi, pj] = visit_vars[pi][pj].varValue
# build the recommended trajectory
recseq = [p0]
while True:
pi = recseq[-1]
pj = visit_mat.loc[pi].idxmax()
value = visit_mat.loc[pi, pj]
assert(int(round(value)) == 1)
recseq.append(pj)
if len(recseq) == L:
assert(int(round(isend_vec[pj])) == 1)
return [np.asarray([int(x) for x in recseq])]
| gpl-3.0 |
maheshakya/scikit-learn | examples/ensemble/plot_gradient_boosting_quantile.py | 392 | 2114 | """
=====================================================
Prediction Intervals for Gradient Boosting Regression
=====================================================
This example shows how quantile regression can be used
to create prediction intervals.
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import GradientBoostingRegressor
np.random.seed(1)
def f(x):
"""The function to predict."""
return x * np.sin(x)
#----------------------------------------------------------------------
# First the noiseless case
X = np.atleast_2d(np.random.uniform(0, 10.0, size=100)).T
X = X.astype(np.float32)
# Observations
y = f(X).ravel()
dy = 1.5 + 1.0 * np.random.random(y.shape)
noise = np.random.normal(0, dy)
y += noise
y = y.astype(np.float32)
# Mesh the input space for evaluations of the real function, the prediction and
# its MSE
xx = np.atleast_2d(np.linspace(0, 10, 1000)).T
xx = xx.astype(np.float32)
alpha = 0.95
clf = GradientBoostingRegressor(loss='quantile', alpha=alpha,
n_estimators=250, max_depth=3,
learning_rate=.1, min_samples_leaf=9,
min_samples_split=9)
clf.fit(X, y)
# Make the prediction on the meshed x-axis
y_upper = clf.predict(xx)
clf.set_params(alpha=1.0 - alpha)
clf.fit(X, y)
# Make the prediction on the meshed x-axis
y_lower = clf.predict(xx)
clf.set_params(loss='ls')
clf.fit(X, y)
# Make the prediction on the meshed x-axis
y_pred = clf.predict(xx)
# Plot the function, the prediction and the 90% confidence interval based on
# the MSE
fig = plt.figure()
plt.plot(xx, f(xx), 'g:', label=u'$f(x) = x\,\sin(x)$')
plt.plot(X, y, 'b.', markersize=10, label=u'Observations')
plt.plot(xx, y_pred, 'r-', label=u'Prediction')
plt.plot(xx, y_upper, 'k-')
plt.plot(xx, y_lower, 'k-')
plt.fill(np.concatenate([xx, xx[::-1]]),
np.concatenate([y_upper, y_lower[::-1]]),
alpha=.5, fc='b', ec='None', label='90% prediction interval')
plt.xlabel('$x$')
plt.ylabel('$f(x)$')
plt.ylim(-10, 20)
plt.legend(loc='upper left')
plt.show()
| bsd-3-clause |
MicrosoftGenomics/FaST-LMM | fastlmm/util/runner/Hadoop.py | 1 | 31716 | '''
Runs a distributable job on an Hadoop cluster. Its run method return 'None'
See SamplePi.py for examples.
'''
import logging
from fastlmm.util.runner import *
import os
import cPickle as pickle
import subprocess, sys, os.path
import multiprocessing
import fastlmm.util.util as util
import pdb
from collections import defaultdict
import tarfile as tarfileLibrary
import ctypes
import datetime
from tempfile import TemporaryFile
class Hadoop: # implements IRunner
fileshare = "/user"
def __init__(self, taskcount, mapmemory=-1, reducememory=-1, mkl_num_threads=None,queue="default",skipsourcecheck=False,skipdatacheck=False,logging_handler=logging.StreamHandler(sys.stdout)):
logger = logging.getLogger()
if not logger.handlers:
logger.setLevel(logging.INFO)
for h in list(logger.handlers):
logger.removeHandler(h)
logger.addHandler(logging_handler)
if logger.level == logging.NOTSET:
logger.setLevel(logging.INFO)
logging.info('constructing Hadoop runner')
self.taskcount = taskcount
self.mapmemory = mapmemory
self.reducememory = reducememory
self.mkl_num_threads = mkl_num_threads
self.queue = queue
self.skipsourcecheck = skipsourcecheck
self.skipdatacheck = skipdatacheck
logging.info('done constructing Hadoop runner')
def run(self, distributable):
logging.info('Hadoop runner is running a distributable')
# Check that the local machine has python path set
localpythonpath = os.environ.get("PYTHONPATH") #!!should it be able to work without pythonpath being set (e.g. if there was just one file)? Also, is None really the return or is it an exception.
if localpythonpath == None: raise Exception("Expect local machine to have 'pythonpath' set")
remotewd, run_dir_abs, run_dir_rel = self.create_run_dir()
result_local = os.path.join(run_dir_rel,"result.p")
result_hdfs = "hdfs:" + os.path.join(run_dir_abs,"result.p").replace("\\","/")
result_remote = os.path.join(distributable.tempdirectory,"result.p")
fileInWorkingDirectoryList =[]
tgzListPythonSettings = self.FindOrCreatePythonSettings(remotewd)
remotepythonpath, tgzListPythonPath = self.FindOrCreateRemotePythonPath(localpythonpath, remotewd)
tgzList = []
inputOutputCopier = HadoopCopier(remotewd, fileInWorkingDirectoryList, tgzList, self.skipdatacheck) #Create the object that copies input and output files to where they are needed
inputOutputCopier.input(distributable) # copy of the input files to where they are needed (i.e. the cluster)
batfilename_abs_list = self.create_bat_file(distributable, remotepythonpath, remotewd, run_dir_abs, run_dir_rel, result_remote, result_hdfs)
self.submit_to_cluster(batfilename_abs_list, fileInWorkingDirectoryList, tgzList, tgzListPythonPath, tgzListPythonSettings, distributable, remotewd, run_dir_abs, run_dir_rel)
inputOutputCopier.output(distributable) # copy the output file from where they were created (i.e. the cluster) to the local computer
subprocess.check_output("%HADOOP_HOME%\\bin\\hadoop fs -copyToLocal {0} {1}\n".format(result_hdfs, result_local),stderr=subprocess.STDOUT,shell=True)
with open(result_local, mode='rb') as f:
result = pickle.load(f)
#logging.info('Done: Hadoop runner is running a distributable. Returns {0}'.format(result))
return result
def submit_to_cluster(self, batfilename_rel_list, fileInWorkingDirectoryList, tgzList, tgzListPythonPath, tgzListPythonSettings, distributable, remotewd, run_dir_abs, run_dir_rel):
logging.info('Hadoop runner is submitting to cluster')
#!! e.g. hdfs://rr1-n13-02-c02/user/carlk/inputs.tgz#inputs,hdfs://rr1-n13-02-c02/user/carlk/datasets.tgz#datasets,hdfs://rr1-n13-02-c02/user/carlk/src.tgz#src
#!! could do this functionally
archivesStringList = []
for tgz in tgzList:
archiveString = "hdfs:{0}#{1}".format(tgz[1],os.path.splitext(tgz[0])[0])
archivesStringList.append(archiveString)
archivesStringList.append(tgzListPythonSettings)
for tgz in tgzListPythonPath:
archivesStringList.append(tgz)
#e.g. distMapper.bat,distReducer.bat
filesString = ",".join(batfilename_rel_list+fileInWorkingDirectoryList)
taskIndexDir = run_dir_rel + os.path.sep + "input"
util.create_directory_if_necessary(taskIndexDir,isfile=False)
#zgoal = int(SP.ceil(SP.log(self.taskcount)/SP.log(10)))
with open(taskIndexDir + os.path.sep + "taskIndexList.txt","w") as taskIndexListFile:
for taskIndex in xrange(self.taskcount):
taskIndexListFile.write("{0}\n".format(taskIndex)) # str(taskIndex).zfill(zgoal)))
#hadoop fs -rmr runs/2013-08-02_13_51_42
#hadoop fs -copyFromLocal runs\2013-08-02_13_51_42 runs/2013-08-02_13_51_42
#hadoop jar %HADOOP_HOME%\lib\hadoop-streaming.jar ^
# -archives "hdfs:/user/carlk/source/carlkextranet05312013/ERG01/src/tests/datasets.2013-07-31_11_12_11.tgz#datasets,hdfs:/user/carlk/runs/pythonpath.0.src.2013-07-31_14_30_56/src.tgz#pythonpath.0.src" ^
# -files "hdfs:/user/carlk/runs/2013-08-02_13_51_42/distMapper.bat,hdfs:/user/carlk/runs/2013-08-02_13_51_42/distReducer.bat,hdfs:/user/carlk/runs/2013-08-02_13_51_42/distributable.p" ^
# -input "runs/2013-08-02_13_51_42/input" ^
# -output "runs/2013-08-02_13_51_42/output" ^
# -mapper "distMapper.bat" ^
# -reducer "distReducer.bat"
#hadoop fs -cat runs/2013-08-02_13_51_42/output/part-00000 | more
s00 = r"%HADOOP_HOME%\bin\hadoop fs -rmr -skipTrash {0}".format(run_dir_rel.replace("\\","/"))
s0 = r"%HADOOP_HOME%\bin\hadoop fs -copyFromLocal {0} {1}".format(run_dir_rel, run_dir_rel.replace("\\","/"))
#-D mapreduce.reduce.shuffle.connect.timeout=3600000 ^
#-D io.sort.mb=1400 ^
#-D job.end.retry.interval=3600000 ^
#-D mapred.tasktracker.expiry.interval=3600000 ^
logging.info("running {0}".format(str(distributable)))
s = r"""%HADOOP_HOME%\bin\hadoop jar %HADOOP_HOME%\lib\hadoop-streaming.jar ^
-archives "{0}" ^
-files "{1}" ^
-D mapred.job.name="{8}" ^
-D mapred.map.tasks={4} ^
-D mapred.reduce.tasks=1 ^
-D mapred.job.map.memory.mb={5} ^
-D mapred.job.reduce.memory.mb={6} ^
-D mapred.task.timeout={7} ^
-D mapred.job.queue.name="{9}" ^
-input {2} ^
-output {3} ^
-inputformat org.apache.hadoop.mapred.lib.NLineInputFormat ^
-mapper "distMapper.bat" ^
-reducer "distReducer.bat"
""".format(
",".join(archivesStringList), #0
filesString, #1
taskIndexDir.replace("\\","/"), #2
(run_dir_rel + os.path.sep + "output").replace("\\","/"), #3
self.taskcount, #4
self.mapmemory, #5
self.reducememory, #6
0, #7
str(distributable), #8
self.queue #9
)
runHadoopFileName = run_dir_rel + os.path.sep + "runHadoop.bat"
logging.info("Hadoop runner is creating '{0}'".format(runHadoopFileName))
with open(runHadoopFileName, "w") as runHadoopFile:
runHadoopFile.write("call {0}\n".format(s00))
runHadoopFile.write("call {0}\n".format(s0))
runHadoopFile.write("call {0}\n".format(s))
sOneLine = "".join(s.split("^\n"))
logging.info("Hadoop runner running the copyFromLocal")
with TemporaryFile() as output:
stdout0 = subprocess.check_output(s0,stderr=output,shell=True)
output.seek(0)
stderr0 = output.read()
logging.info("Result from 'Hadoop runner running the copyFromLocal' is stdout='{0}', stderr='{1}'".format(stdout0, stderr0))
if stderr0 != "" : raise Exception("Stderr from command: '{0}'".format(stderr0))
logging.info("Hadoop runner running the streamingjar")
with TemporaryFile() as output:
stdout = subprocess.check_output(sOneLine,stderr=output,shell=True)
output.seek(0)
stderr = output.read()
logging.info("Result from 'Hadoop runner running the streamingjar' is stdout='{0}', stderr='{1}'".format(stdout, stderr))
logging.info('Done: Hadoop runner is submitting to cluster')
#if stderr != "" : raise Exception("Stderr from command: '{0}'".format(stderr))
def FindOrCreatePythonSettings(self, remotewd):
localpythonpathsetting = r"\\msr-arrays\scratch\msr-pool\eScience3\.continuum" # os.path.join(os.environ.get("userprofile"),".continuum")
lastFolderName = os.path.split(os.path.normpath(localpythonpathsetting))[1]
#util.create_directory_if_necessary(localpythonpathsetting,isfile=False)
#Must set assume_changed=True for otherwise hidden .continuum file to be used.
tgzName = HadoopCopier.CheckUpdateTgz(localpythonpathsetting, subsubItemList1=None, skipcheck=False, filter_hidden=False)
hdfstgz = "hdfs:{3}/{2}.{1}/{2}.tgz".format(None,str(datetime.datetime.fromtimestamp(os.path.getmtime(tgzName)))[:19].replace(" ","_").replace(":","_"),lastFolderName,remotewd)
Hadoop.hdfsCopyFromLocalIfNotThere(tgzName, hdfstgz)
return hdfstgz + "#" + lastFolderName
def create_distributablep(self, distributable, run_dir_abs, run_dir_rel):
logging.info('Hadoop runner is pickling distributable')
distributablep_filename_rel = os.path.join(run_dir_rel, "distributable.p")
#distributablep_filename_abs = os.path.join(run_dir_abs, "distributable.p")
util.create_directory_if_necessary(distributablep_filename_rel)
with open(distributablep_filename_rel, mode='wb') as f:
pickle.dump(distributable, f, pickle.HIGHEST_PROTOCOL)
logging.info('Done: Hadoop runner is pickling distributable')
return distributablep_filename_rel
def create_bat_file(self, distributable, remotepythonpath, remotewd, run_dir_abs, run_dir_rel, result_remote, result_hdfs):
logging.info('Hadoop runner is creating bat file')
outFileList = Hadoop.RecursivelyGetAllOutputs(distributable)
distributablep_filename_rel = self.create_distributablep(distributable, run_dir_abs, run_dir_rel)
distributable_py_file = os.path.join(os.path.dirname(__file__),"..","distributable.py")
if not os.path.exists(distributable_py_file): raise Exception("Expect file at " + distributable_py_file + ", but it doesn't exist.")
localfilepath, file = os.path.split(distributable_py_file)
remoteexepath = os.path.join(remotepythonpath.split(';')[0],"fastlmm","util") #!!shouldn't need to assume where the file is in source
batfilename_abs_list = []
for part in ["Mapper","Reducer"]:
command_string = remoteexepath + os.path.sep + file + r""" distributable.p "Local{0}({1},""{2}"",mkl_num_threads={3},logging_handler=logging.StreamHandler())" """.format(
part,
self.taskcount,
result_remote.replace("\\","/"), #change to DOS separator to Unix separator because python will work with either and this avoid problems with parsing the batch file
self.mkl_num_threads)
batfilename_rel = os.path.join(run_dir_rel,"dist{0}.bat".format(part))
batfilename_abs = "hdfs:" + os.path.join(run_dir_abs,"dist{0}.bat".format(part)).replace("\\","/")
batfilename_abs_list.append(batfilename_abs)
util.create_directory_if_necessary(batfilename_rel, isfile=True)
with open(batfilename_rel, "w") as batfile:
batfile.write("@set path={0};{0}\Scripts;%path%\n".format(r"c:\GCD\esciencepy"))
batfile.write("@set PYTHONPATH={0}\n".format(remotepythonpath))
batfile.write("@set home=%cd%\n")
#batfile.write("@mklink /d .continuum continuum\n")
#batfile.write("@dir /s\n")
#batfile.write("@set R_HOME={0}\n".format(os.path.join(remotepythoninstall,"R-2.15.2")))
#batfile.write("@set R_USER={0}\n".format("."))
batfile.write("@mkdir {0}\n@mkdir {0}\\tex.cache\n@set MPLCONFIGDIR={0}\n".format(".matplotlib"))
batfile.write("@mkdir {0}\nset IPYTHONDIR={0}\n".format(".ipython"))
#batfile.write("xcopy /d /e /s /c /h /i continuum .continuum\n")
batfile.write("@call python {0}\n".format(command_string))
if part == "Reducer":
batfile.write("@call %HADOOP_HOME%\\bin\\hadoop fs -rm {0} -skipTrash\n".format(result_hdfs))
batfile.write("@call %HADOOP_HOME%\\bin\\hadoop fs -copyFromLocal {0} {1}\n".format(result_remote, result_hdfs))
for outfile in outFileList:
hdfsOutFile = remotewd + "/" + outfile
batfile.write("@call %HADOOP_HOME%\\bin\\hadoop fs -rm {0}\n".format(hdfsOutFile))
batfile.write("@call %HADOOP_HOME%\\bin\\hadoop fs -copyFromLocal {0} {1}\n".format(outfile, hdfsOutFile))
picklefilename_abs = "hdfs:" + os.path.join(run_dir_abs,"distributable.p").replace("\\","/")
batfilename_abs_list.append(picklefilename_abs)
logging.info('Done: Hadoop runner is creating bat file')
return batfilename_abs_list
@staticmethod
def RecursivelyGetAllOutputs(item):
outputList = []
ListCopier([],outputList).output(item)
return outputList
def FindOrCreateRemotePythonPath(self, localpythonpath, remotewd):
#input: something like: 'D:\\Source\\carlkextranet05312013\\ERG01\\src\\' and maybe a second item, etc
#sideeffect: create D:\\Source\\carlkextranet05312013\\ERG01\\src.tgz and 2nd item
#sideeffect: copy if newer to to hdfs /user/carlk/runs/pythonpath.src.0.<moddate>/src.tgz and ....1.....2nditem
#return 1 list of "hdfs /user/carlk/runs/pythonpath.0.<moddate>/src.tgz#pythonpath.0.src"
# 2 remotepythonpath, e.g. "pythonpath.0.src;pythonpath.1.2nditem"
remotepythonpath_list = []
tgzList = []
for i, localpythonpathdir in enumerate(localpythonpath.split(';')):
tgzName = HadoopCopier.CheckUpdateTgz(localpythonpathdir,skipcheck=self.skipsourcecheck)
lastFolderName = os.path.split(os.path.normpath(localpythonpathdir))[1]
hdfstgz = "hdfs:{3}/pythonpath.{0}.{2}.{1}/{2}.tgz".format(i,str(datetime.datetime.fromtimestamp(os.path.getmtime(tgzName)))[:19].replace(" ","_").replace(":","_"),lastFolderName,remotewd)
Hadoop.hdfsCopyFromLocalIfNotThere(tgzName, hdfstgz)
remotepythonpathdir = "pythonpath.{0}.{1}".format(i,lastFolderName)
remotepythonpath_list.append(remotepythonpathdir)
tgzList.append(hdfstgz + "#" + remotepythonpathdir)
remotepythonpath = ";".join(remotepythonpath_list)
#these are parallel
return remotepythonpath, tgzList
@staticmethod
def hdfsCopyFromLocalIfNotThere(tgzName, hdfstgz):
# if it is there won't copy. If it isn't there will copy.
subprocess.check_output(r"type {0} | %HADOOP_HOME%\bin\Hadoop fs -put - {1}".format(tgzName, hdfstgz[5:]),stderr=subprocess.STDOUT,shell=True)
def create_run_dir(self):
username = os.environ["USERNAME"]
localwd = os.getcwd()
if localwd.startswith("\\\\"):
remotewd = self.fileshare + os.path.sep + username + os.path.sep + "\\".join(localwd.split('\\')[4:])
else:
remotewd = self.fileshare + os.path.sep + username + os.path.splitdrive(localwd)[1] #using '+' because 'os.path.join' isn't work with shares
remotewd = remotewd.replace("\\","/")
if remotewd.endswith("/"): # remove trailing /
remotewd = remotewd[:-1]
run_dir_rel = os.path.join("runs",util.datestamp(appendrandom=True))
util.create_directory_if_necessary("runs",isfile=False)
if not os.path.isfile(".ignoreTgzChange"):
with open("runs" + os.path.sep + ".ignoreTgzChange","w") as ignoreFile:
ignoreFile.write("\n")
run_dir_abs = "/user/{0}/{1}".format(username,run_dir_rel)
#!! hadoop_create_directory_if_necessary(run_dir_abs,isfile=False)
return remotewd, run_dir_abs, run_dir_rel
#!! move these hadoop commands to a library
@staticmethod
def hadoop_create_directory_if_necessary(name, isfile=True):
import os
if isfile:
directory_name = os.path.dirname(name)
else:
directory_name = name
hadoop_makedirs(directory_name)
#!! what if already is there?
@staticmethod
def hadoop_makedirs(directory_name):
hadoop_command("fs -mkdir {0}".format(directory_name))
@staticmethod
def hadoop_command(command_string):
rc = os.system(os.environ.get("HADOOP_HOME") + os.path.sep + "bin" + os.path.sep + "hadoop " + command_string)
if rc != 0: raise Exception("Hadoop command '{0}' fails with rc={1}.".format(command_string,rc))
class ListCopier(object): #Implements ICopier
def __init__(self, inputList, outputList): #The list will be modified
if len(inputList) != 0 : raise Exception("Expect inputList to start empty")
if len(outputList) != 0 : raise Exception("Expect outputList to start empty")
self.inputList = inputList
self.outputList = outputList
def input(self,item):
if isinstance(item, str):
self.inputList.append(item)
elif hasattr(item,"copyinputs"):
item.copyinputs(self)
#else do nothing
pass # ignore
def output(self,item):
if isinstance(item, str):
self.outputList.append(item)
elif hasattr(item,"copyoutputs"):
item.copyoutputs(self)
#else do nothing
pass # ignore
class HadoopCopier(object): #Implements ICopier
def __init__(self, remotewd, fileInWorkingDirectoryList, tgzList, skipdatacheck): #The two lists will be modified
if len(fileInWorkingDirectoryList) != 0 : raise Exception("Expect fileInWorkingDirectoryList to start empty")
if len(tgzList) != 0 : raise Exception("Expect tgzList to start empty")
self.remotewd = remotewd
self.fileInWorkingDirectoryList = fileInWorkingDirectoryList
self.tgzList = tgzList
self.skipdatacheck = skipdatacheck
def input(self,item):
inputList = self.RecursivelyGetAllInputs(item)
#group by subfolder, treating files in the working directory as special and giving an error with higher levels
fileInWorkingDirectoryList, subDirectoryToSubSubItemList = self.GroupByTopLevelSubFolder(inputList)
#create or update a tgz for each directory
for directory,subsubItemList in subDirectoryToSubSubItemList.iteritems():
tgzName = HadoopCopier.CheckUpdateTgz(directory, subsubItemList1=subsubItemList, skipcheck=self.skipdatacheck)
hdfsName = "{0}/{1}.{2}.tgz".format(self.remotewd,directory,str(datetime.datetime.fromtimestamp(os.path.getmtime(tgzName)))[:19].replace(" ","_").replace(":","_")).replace("\\","/")
self.tgzList.append((tgzName,hdfsName))
hdfsNameWild = "{0}/{1}.{2}.tgz".format(self.remotewd,directory,'*').replace("\\","/")
lookout = subprocess.check_output(r"%HADOOP_HOME%\bin\Hadoop fs -ls {0}".format(hdfsName),stderr=subprocess.STDOUT,shell=True)
if "No such file or directory" in lookout:
subprocess.check_output(r"%HADOOP_HOME%\bin\Hadoop fs -rm {0}".format(hdfsNameWild),stderr=subprocess.STDOUT,shell=True)
subprocess.check_output(r"%HADOOP_HOME%\bin\Hadoop fs -copyFromLocal {0} {1}".format(tgzName, hdfsName),stderr=subprocess.STDOUT,shell=True)
for file in fileInWorkingDirectoryList:
if os.path.getsize(file) > 10 * 1024 * 1024:
logging.warn("File '{0}' is large ({1}GB) and would be more efficiently distributed on Hadoop if placed in subfolder of the working directory.".format(file, round(os.path.getsize(file)/(1024*1024))))
self.fileInWorkingDirectoryList.append(file)
# filesInWorkingDirectory copy to hdfs
@staticmethod
def CheckUpdateTgz(directory0, subsubItemList1 = None, skipcheck=False, filter_hidden=True):
if subsubItemList1 == None:
subsubItemList1 = [[]]
directory = os.path.normpath(directory0)
tgzName = directory + ".tgz"
if not skipcheck and os.path.exists(tgzName) and not [] in subsubItemList1 and not filter_hidden:
if not os.path.isfile(tgzName): raise Exception("Expect '{0}' to be a file.".format(tgzName))
logging.info("Making list of any files already in {0}".format(tgzName))
tarfile = tarfileLibrary.open(tgzName, "r")
for tarinfo in tarfile.getmembers():
if tarinfo.isfile():
filenamein = tarinfo.filename.replace("/",os.path.sep) #"/" is the tar separator
filename = os.path.join(directory,filenamein)
if os.path.exists(filename) and os.path.isfile(filename):
subsubItemList1.append(filenamein.split("/")) #"/" is the tar separator
tarfile.close()
subsubItemListOut = HadoopCopier.RemoveRedundant(subsubItemList1)
if not HadoopCopier.IsTarFileUpToDate(directory, subsubItemListOut, tgzName, filter_hidden):
HadoopCopier.CreateNewTarFile(directory, subsubItemListOut, tgzName, filter_hidden)
return tgzName
@staticmethod
def CreateNewTarFile(directory, subsubItemList, tgzName, filter_hidden=True):
# logging.info("{0}, {1}, {2}".format(directory, subsubItemList, tgzName))
directory1 = os.path.normpath(directory)
tgzNameTemp = tgzName + ".tar" #create a temp file so that both files will exist for a while
logging.info("Creating '{0}'".format(tgzNameTemp))
tarfile = tarfileLibrary.open(tgzNameTemp,"w:")
for subsubItem in subsubItemList:
tarName = "/".join(subsubItem) # tar files use "/" not os.path.sep
tarfile.add(os.path.normpath(directory + "/" + tarName), tarName, recursive=True, filter=lambda x, directory1=directory1,filter_hidden=filter_hidden : HadoopCopier.tarfile_filter_hidden(x,directory1,filter_hidden))
tarfile.close()
logging.info("Compressing '{0}'".format(tgzNameTemp))
subprocess.call(r"c:\cygwin64\bin\gzip.exe --force --fast {0}".format(tgzNameTemp), shell=True)
logging.info("Finished Compressing '{0}'".format(tgzNameTemp))
if os.path.exists(tgzName):
os.remove(tgzName)
os.rename(tgzNameTemp+".gz",tgzName)
@staticmethod
def RemoveRedundant(subsubItemList1):
subsubItemListOut = []
for index1,subsubItem1 in enumerate(subsubItemList1):
if HadoopCopier.shouldinclude(index1,subsubItem1,subsubItemList1):
subsubItemListOut.append(subsubItem1)
return subsubItemListOut
@staticmethod
def shouldinclude(index1,subsubItem1,subsubItemList1):
for index2,subsubItem2 in enumerate(subsubItemList1):
if HadoopCopier.knocksout(index1,subsubItem1,index2,subsubItem2):
return False
return True
@staticmethod
def knocksout(index1,subsubItem1,index2,subsubItem2):
if index1 == index2:
return False
if len(subsubItem1) == len(subsubItem2) and index1 > index2:
return False
if len(subsubItem1) >= len(subsubItem2):
for i in xrange(len(subsubItem2)):
if subsubItem1[i] != subsubItem2[i]:
return False
return True
return False
def RecursivelyGetAllInputs(self, item):
inputList = []
ListCopier(inputList,[]).input(item)
return inputList
def GroupByTopLevelSubFolder(self, inputList):
filesInWorkingDirectory = []
subDirectoryToSubSubItemList = defaultdict(list)
for item in inputList:
normpath = os.path.normpath(item)
try:
relpath = os.path.relpath(normpath)
except :
raise Exception("for Hadoop input files must be in or below the current directory ('{0}' is not)".format(item))
if not os.path.exists(relpath): raise Exception("Expect input file (or directory) '{0}' but it doesn't exist".format(relpath))
parts = relpath.split(os.path.sep)
if os.path.isfile(relpath): #If the input item is a file ...
if len(parts) == 1: #If the input item is in the working directory
filesInWorkingDirectory.append(relpath) #add it to the filesInWorkingDirectory list
else: #A file in a subfolder
subDirectoryToSubSubItemList[parts[0]].append(parts[1:]) #Add it to the list for the subfolder
else: #A folder
if not os.path.isdir(relpath): raise Exception("assert")
if len(parts) == 1:
subDirectoryToSubSubItemList[relpath].append([])
else:
subDirectoryToSubSubItemList[parts[0]].append(parts[1:])
return filesInWorkingDirectory, subDirectoryToSubSubItemList
@staticmethod
def IsTarFileUpToDate(directory, subsubItemList, tgzName, filter_hidden):
if os.path.exists(tgzName):
logging.info("Checking if '{0}' is up to date".format(tgzName))
if not os.path.isfile(tgzName): raise Exception("Expect '{0}' to be a file.".format(tgzName))
tarfile = tarfileLibrary.open(tgzName, "r")
isUpToDate = HadoopCopier.IsTarFileUpToDateInternal(directory, subsubItemList, tarfile, filter_hidden)
tarfile.close()
logging.info("'{0}' up to date? {1} ".format(tgzName, isUpToDate))
return isUpToDate
else:
return False
@staticmethod
def IsTarFileUpToDateInternal(directory, subsubItemList, tgzFile, filter_hidden):
howToIgnoreString = "To ignore changes in a directory add a file '{0}'".format(HadoopCopier._ignoreTgzChangeFileName)
for subsubItem in subsubItemList:
tarName = "/".join(subsubItem) # use "/" instead of os.path.sep because that is what the tar file uses
winfileOrDirectory = os.path.normpath(directory + os.path.sep + tarName)
try:
member = tgzFile.getmember(tarName)
except Exception, e:
logging.info("'{0}' not up to date because of exception {1}. ({2})".format(tarName, e, howToIgnoreString))
return False;
else:
if os.path.isfile(winfileOrDirectory):
if int(os.path.getmtime(winfileOrDirectory)) != int(member.mtime):
logging.info("'{0}' not up to date because of date change in '{1}'. ({2})".format(tgzFile, winfileOrDirectory, howToIgnoreString))
return False;
else:
if not os.path.isdir(winfileOrDirectory): raise Exception("Expect '{0}' to be a directory. ({1})".format(winfileOrDirectory, howToIgnoreString))
for winfileOrDirectory2 in HadoopCopier.WalkWithoutHidden(winfileOrDirectory, filter_hidden):
tarName2 = os.path.relpath(winfileOrDirectory2, winfileOrDirectory).replace('\\','/')
try:
member = tgzFile.getmember(tarName2)
except KeyError:
logging.info("'{0}' not up to date because '{1}' is not found. ({2})".format(tgzFile, tarName2, howToIgnoreString))
return False;
else:
if os.path.isfile(winfileOrDirectory2):
if int(os.path.getmtime(winfileOrDirectory2)) != int(member.mtime):
logging.info("'{0}' not up to date because of date change in '{1}'. ({2})".format(tgzFile, winfileOrDirectory2, howToIgnoreString))
return False;
return True
@staticmethod
def WalkWithoutHidden(directory, filter_hidden):
if not HadoopCopier.is_hidden(directory, filter_hidden):
for sub in os.listdir(directory):
subfull = os.path.join(directory,sub)
if os.path.isfile(subfull):
if not HadoopCopier.is_hidden(subfull, filter_hidden):
yield subfull
else:
for file in HadoopCopier.WalkWithoutHidden(subfull, filter_hidden):
yield file
@staticmethod
def dont_filter(tarinfo, directory1):
filenamein = tarinfo.name.replace("/",os.path.sep) #"/" is the tar separator
filename = os.path.join(directory1,filenamein)
logging.info("Adding file to tar '{0}'".format(filename))
return tarinfo
@staticmethod
def tarfile_filter_hidden(tarinfo, directory1, filter_hidden):
filenamein = tarinfo.name.replace("/",os.path.sep) #"/" is the tar separator
filename = os.path.join(directory1,filenamein)
if HadoopCopier.is_hidden(filename, filter_hidden):
#logging.info("skipping '{0}'".format(filenamein))
return None
else:
logging.info("Adding file to tar '{0}'".format(filename))
return tarinfo
@staticmethod
def is_hidden(filepath, filter_hidden):
if not filter_hidden:
return False
else:
name = os.path.basename(os.path.abspath(filepath))
return name.startswith('.') or HadoopCopier.has_hidden_attribute(filepath) or HadoopCopier.containsDotIgnoreTgzChange(filepath)
_ignoreTgzChangeFileName = ".ignoreTgzChange"
#!! test that this stops it from look down below
@staticmethod
def containsDotIgnoreTgzChange(filepath):
signalPath = os.path.join(filepath,HadoopCopier._ignoreTgzChangeFileName)
return os.path.exists(signalPath)
@staticmethod
def has_hidden_attribute(filepath):
try:
attrs = ctypes.windll.kernel32.GetFileAttributesW(unicode(filepath))
assert attrs != -1
result = bool(attrs & 2)
except (AttributeError, AssertionError):
result = False
return result
def output(self,item):
outFileList = Hadoop.RecursivelyGetAllOutputs(item)
for outfile in outFileList:
if os.path.exists(outfile):
os.remove(outfile)
hdfsOutFile = self.remotewd + "/" + outfile
subprocess.check_output("%HADOOP_HOME%\\bin\\hadoop fs -copyToLocal {0} {1}\n".format(hdfsOutFile, outfile),stderr=subprocess.STDOUT,shell=True)
| apache-2.0 |
karoldvl/EARS | ears/train.py | 2 | 5252 | # -*- coding: utf-8 -*-
import json
import logging
import os
import time
import warnings
import librosa
import numpy as np
import pandas as pd
import pydub
import sklearn.preprocessing
from tqdm import tqdm
THEANO_FLAGS = ('device=gpu0,'
'floatX=float32,'
'dnn.conv.algo_bwd_filter=deterministic,'
'dnn.conv.algo_bwd_data=deterministic')
os.environ['THEANO_FLAGS'] = THEANO_FLAGS
os.environ['KERAS_BACKEND'] = 'theano'
import keras
keras.backend.set_image_dim_ordering('th')
from keras.layers.convolutional import Conv2D as Conv
from keras.layers.convolutional import MaxPooling2D as Pool
from keras.layers.advanced_activations import LeakyReLU
from keras.layers.core import Activation, Dense, Dropout, Flatten
from keras.regularizers import l2 as L2
from config import *
def to_one_hot(targets, class_count):
"""Encode target classes in a one-hot matrix.
"""
one_hot_enc = np.zeros((len(targets), class_count))
for r in range(len(targets)):
one_hot_enc[r, targets[r]] = 1
return one_hot_enc
def extract_segment(filename):
"""Get one random segment from a recording.
"""
spec = np.load('dataset/tmp/' + filename + '.spec.npy').astype('float32')
offset = np.random.randint(0, np.shape(spec)[1] - SEGMENT_LENGTH + 1)
spec = spec[:, offset:offset + SEGMENT_LENGTH]
return np.stack([spec])
def iterrows(dataframe):
"""Iterate over a random permutation of dataframe rows.
"""
while True:
for row in dataframe.iloc[np.random.permutation(len(dataframe))].itertuples():
yield row
def iterbatches(batch_size, training_dataframe):
"""Generate training batches.
"""
itrain = iterrows(training_dataframe)
while True:
X, y = [], []
for i in range(batch_size):
row = next(itrain)
X.append(extract_segment(row.filename))
y.append(le.transform([row.category])[0])
X = np.stack(X)
y = to_one_hot(np.array(y), len(labels))
X -= AUDIO_MEAN
X /= AUDIO_STD
yield X, y
if __name__ == '__main__':
np.random.seed(1)
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
# Load dataset
meta = pd.read_csv('dataset/dataset.csv')
labels = pd.unique(meta.sort_values('category')['category'])
le = sklearn.preprocessing.LabelEncoder()
le.fit(labels)
# Generate spectrograms
logger.info('Generating spectrograms...')
if not os.path.exists('dataset/tmp/'):
os.mkdir('dataset/tmp/')
for row in tqdm(meta.itertuples(), total=len(meta)):
spec_file = 'dataset/tmp/' + row.filename + '.spec.npy'
audio_file = 'dataset/audio/' + row.filename
if os.path.exists(spec_file):
continue
audio = pydub.AudioSegment.from_file(audio_file).set_frame_rate(SAMPLING_RATE).set_channels(1)
audio = (np.fromstring(audio._data, dtype="int16") + 0.5) / (0x7FFF + 0.5)
spec = librosa.feature.melspectrogram(audio, SAMPLING_RATE, n_fft=FFT_SIZE,
hop_length=CHUNK_SIZE, n_mels=MEL_BANDS)
with warnings.catch_warnings():
warnings.simplefilter('ignore') # Ignore log10 zero division
spec = librosa.core.perceptual_weighting(spec, MEL_FREQS, amin=1e-5, ref_power=1e-5,
top_db=None)
spec = np.clip(spec, 0, 100)
np.save(spec_file, spec.astype('float16'), allow_pickle=False)
# Define model
logger.info('Constructing model...')
input_shape = 1, MEL_BANDS, SEGMENT_LENGTH
model = keras.models.Sequential()
model.add(Conv(80, (3, 3), kernel_regularizer=L2(0.001), kernel_initializer='he_uniform',
input_shape=input_shape))
model.add(LeakyReLU())
model.add(Pool((3, 3), (3, 3)))
model.add(Conv(160, (3, 3), kernel_regularizer=L2(0.001), kernel_initializer='he_uniform'))
model.add(LeakyReLU())
model.add(Pool((3, 3), (3, 3)))
model.add(Conv(240, (3, 3), kernel_regularizer=L2(0.001), kernel_initializer='he_uniform'))
model.add(LeakyReLU())
model.add(Pool((3, 3), (3, 3)))
model.add(Flatten())
model.add(Dropout(0.5))
model.add(Dense(len(labels), kernel_regularizer=L2(0.001), kernel_initializer='he_uniform'))
model.add(Activation('softmax'))
optimizer = keras.optimizers.SGD(lr=0.001, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy'])
# Train model
batch_size = 100
EPOCH_MULTIPLIER = 10
epochs = 1000 // EPOCH_MULTIPLIER
epoch_size = len(meta) * EPOCH_MULTIPLIER
bpe = epoch_size // batch_size
logger.info('Training... (batch size of {} | {} batches per epoch)'.format(batch_size, bpe))
model.fit_generator(generator=iterbatches(batch_size, meta),
steps_per_epoch=bpe,
epochs=epochs)
with open('model.json', 'w') as file:
file.write(model.to_json())
model.save_weights('model.h5')
with open('model_labels.json', 'w') as file:
json.dump(le.classes_.tolist(), file)
| mit |
plissonf/scikit-learn | sklearn/decomposition/__init__.py | 147 | 1421 | """
The :mod:`sklearn.decomposition` module includes matrix decomposition
algorithms, including among others PCA, NMF or ICA. Most of the algorithms of
this module can be regarded as dimensionality reduction techniques.
"""
from .nmf import NMF, ProjectedGradientNMF
from .pca import PCA, RandomizedPCA
from .incremental_pca import IncrementalPCA
from .kernel_pca import KernelPCA
from .sparse_pca import SparsePCA, MiniBatchSparsePCA
from .truncated_svd import TruncatedSVD
from .fastica_ import FastICA, fastica
from .dict_learning import (dict_learning, dict_learning_online, sparse_encode,
DictionaryLearning, MiniBatchDictionaryLearning,
SparseCoder)
from .factor_analysis import FactorAnalysis
from ..utils.extmath import randomized_svd
from .online_lda import LatentDirichletAllocation
__all__ = ['DictionaryLearning',
'FastICA',
'IncrementalPCA',
'KernelPCA',
'MiniBatchDictionaryLearning',
'MiniBatchSparsePCA',
'NMF',
'PCA',
'ProjectedGradientNMF',
'RandomizedPCA',
'SparseCoder',
'SparsePCA',
'dict_learning',
'dict_learning_online',
'fastica',
'randomized_svd',
'sparse_encode',
'FactorAnalysis',
'TruncatedSVD',
'LatentDirichletAllocation']
| bsd-3-clause |
0x0all/kaggle-galaxies | try_convnet_cc_multirotflip_3x69r45_8433n_maxout2048_extradense.py | 7 | 17404 | import numpy as np
# import pandas as pd
import theano
import theano.tensor as T
import layers
import cc_layers
import custom
import load_data
import realtime_augmentation as ra
import time
import csv
import os
import cPickle as pickle
from datetime import datetime, timedelta
# import matplotlib.pyplot as plt
# plt.ion()
# import utils
BATCH_SIZE = 16
NUM_INPUT_FEATURES = 3
LEARNING_RATE_SCHEDULE = {
0: 0.04,
1800: 0.004,
2300: 0.0004,
}
MOMENTUM = 0.9
WEIGHT_DECAY = 0.0
CHUNK_SIZE = 10000 # 30000 # this should be a multiple of the batch size, ideally.
NUM_CHUNKS = 2500 # 3000 # 1500 # 600 # 600 # 600 # 500
VALIDATE_EVERY = 20 # 12 # 6 # 6 # 6 # 5 # validate only every 5 chunks. MUST BE A DIVISOR OF NUM_CHUNKS!!!
# else computing the analysis data does not work correctly, since it assumes that the validation set is still loaded.
NUM_CHUNKS_NONORM = 1 # train without normalisation for this many chunks, to get the weights in the right 'zone'.
# this should be only a few, just 1 hopefully suffices.
GEN_BUFFER_SIZE = 1
# # need to load the full training data anyway to extract the validation set from it.
# # alternatively we could create separate validation set files.
# DATA_TRAIN_PATH = "data/images_train_color_cropped33_singletf.npy.gz"
# DATA2_TRAIN_PATH = "data/images_train_color_8x_singletf.npy.gz"
# DATA_VALIDONLY_PATH = "data/images_validonly_color_cropped33_singletf.npy.gz"
# DATA2_VALIDONLY_PATH = "data/images_validonly_color_8x_singletf.npy.gz"
# DATA_TEST_PATH = "data/images_test_color_cropped33_singletf.npy.gz"
# DATA2_TEST_PATH = "data/images_test_color_8x_singletf.npy.gz"
TARGET_PATH = "predictions/final/try_convnet_cc_multirotflip_3x69r45_8433n_maxout2048_extradense.csv"
ANALYSIS_PATH = "analysis/final/try_convnet_cc_multirotflip_3x69r45_8433n_maxout2048_extradense.pkl"
# FEATURES_PATTERN = "features/try_convnet_chunked_ra_b3sched.%s.npy"
print "Set up data loading"
# TODO: adapt this so it loads the validation data from JPEGs and does the processing realtime
input_sizes = [(69, 69), (69, 69)]
ds_transforms = [
ra.build_ds_transform(3.0, target_size=input_sizes[0]),
ra.build_ds_transform(3.0, target_size=input_sizes[1]) + ra.build_augmentation_transform(rotation=45)
]
num_input_representations = len(ds_transforms)
augmentation_params = {
'zoom_range': (1.0 / 1.3, 1.3),
'rotation_range': (0, 360),
'shear_range': (0, 0),
'translation_range': (-4, 4),
'do_flip': True,
}
augmented_data_gen = ra.realtime_augmented_data_gen(num_chunks=NUM_CHUNKS, chunk_size=CHUNK_SIZE,
augmentation_params=augmentation_params, ds_transforms=ds_transforms,
target_sizes=input_sizes)
post_augmented_data_gen = ra.post_augment_brightness_gen(augmented_data_gen, std=0.5)
train_gen = load_data.buffered_gen_mp(post_augmented_data_gen, buffer_size=GEN_BUFFER_SIZE)
y_train = np.load("data/solutions_train.npy")
train_ids = load_data.train_ids
test_ids = load_data.test_ids
# split training data into training + a small validation set
num_train = len(train_ids)
num_test = len(test_ids)
num_valid = num_train // 10 # integer division
num_train -= num_valid
y_valid = y_train[num_train:]
y_train = y_train[:num_train]
valid_ids = train_ids[num_train:]
train_ids = train_ids[:num_train]
train_indices = np.arange(num_train)
valid_indices = np.arange(num_train, num_train + num_valid)
test_indices = np.arange(num_test)
def create_train_gen():
"""
this generates the training data in order, for postprocessing. Do not use this for actual training.
"""
data_gen_train = ra.realtime_fixed_augmented_data_gen(train_indices, 'train',
ds_transforms=ds_transforms, chunk_size=CHUNK_SIZE, target_sizes=input_sizes)
return load_data.buffered_gen_mp(data_gen_train, buffer_size=GEN_BUFFER_SIZE)
def create_valid_gen():
data_gen_valid = ra.realtime_fixed_augmented_data_gen(valid_indices, 'train',
ds_transforms=ds_transforms, chunk_size=CHUNK_SIZE, target_sizes=input_sizes)
return load_data.buffered_gen_mp(data_gen_valid, buffer_size=GEN_BUFFER_SIZE)
def create_test_gen():
data_gen_test = ra.realtime_fixed_augmented_data_gen(test_indices, 'test',
ds_transforms=ds_transforms, chunk_size=CHUNK_SIZE, target_sizes=input_sizes)
return load_data.buffered_gen_mp(data_gen_test, buffer_size=GEN_BUFFER_SIZE)
print "Preprocess validation data upfront"
start_time = time.time()
xs_valid = [[] for _ in xrange(num_input_representations)]
for data, length in create_valid_gen():
for x_valid_list, x_chunk in zip(xs_valid, data):
x_valid_list.append(x_chunk[:length])
xs_valid = [np.vstack(x_valid) for x_valid in xs_valid]
xs_valid = [x_valid.transpose(0, 3, 1, 2) for x_valid in xs_valid] # move the colour dimension up
print " took %.2f seconds" % (time.time() - start_time)
print "Build model"
l0 = layers.Input2DLayer(BATCH_SIZE, NUM_INPUT_FEATURES, input_sizes[0][0], input_sizes[0][1])
l0_45 = layers.Input2DLayer(BATCH_SIZE, NUM_INPUT_FEATURES, input_sizes[1][0], input_sizes[1][1])
l0r = layers.MultiRotSliceLayer([l0, l0_45], part_size=45, include_flip=True)
l0s = cc_layers.ShuffleBC01ToC01BLayer(l0r)
l1a = cc_layers.CudaConvnetConv2DLayer(l0s, n_filters=32, filter_size=8, weights_std=0.01, init_bias_value=0.1, dropout=0.0, partial_sum=1, untie_biases=True)
l1 = cc_layers.CudaConvnetPooling2DLayer(l1a, pool_size=2)
l2a = cc_layers.CudaConvnetConv2DLayer(l1, n_filters=64, filter_size=4, weights_std=0.01, init_bias_value=0.1, dropout=0.0, partial_sum=1, untie_biases=True)
l2 = cc_layers.CudaConvnetPooling2DLayer(l2a, pool_size=2)
l3a = cc_layers.CudaConvnetConv2DLayer(l2, n_filters=128, filter_size=3, weights_std=0.01, init_bias_value=0.1, dropout=0.0, partial_sum=1, untie_biases=True)
l3b = cc_layers.CudaConvnetConv2DLayer(l3a, n_filters=128, filter_size=3, pad=0, weights_std=0.1, init_bias_value=0.1, dropout=0.0, partial_sum=1, untie_biases=True)
l3 = cc_layers.CudaConvnetPooling2DLayer(l3b, pool_size=2)
l3s = cc_layers.ShuffleC01BToBC01Layer(l3)
j3 = layers.MultiRotMergeLayer(l3s, num_views=4) # 2) # merge convolutional parts
l4a = layers.DenseLayer(j3, n_outputs=4096, weights_std=0.001, init_bias_value=0.01, dropout=0.5, nonlinearity=layers.identity)
l4b = layers.FeatureMaxPoolingLayer(l4a, pool_size=2, feature_dim=1, implementation='reshape')
l4c = layers.DenseLayer(l4b, n_outputs=4096, weights_std=0.001, init_bias_value=0.01, dropout=0.5, nonlinearity=layers.identity)
l4 = layers.FeatureMaxPoolingLayer(l4c, pool_size=2, feature_dim=1, implementation='reshape')
# l5 = layers.DenseLayer(l4, n_outputs=37, weights_std=0.01, init_bias_value=0.0, dropout=0.5, nonlinearity=custom.clip_01) # nonlinearity=layers.identity)
l5 = layers.DenseLayer(l4, n_outputs=37, weights_std=0.01, init_bias_value=0.1, dropout=0.5, nonlinearity=layers.identity)
# l6 = layers.OutputLayer(l5, error_measure='mse')
l6 = custom.OptimisedDivGalaxyOutputLayer(l5) # this incorporates the constraints on the output (probabilities sum to one, weighting, etc.)
train_loss_nonorm = l6.error(normalisation=False)
train_loss = l6.error() # but compute and print this!
valid_loss = l6.error(dropout_active=False)
all_parameters = layers.all_parameters(l6)
all_bias_parameters = layers.all_bias_parameters(l6)
xs_shared = [theano.shared(np.zeros((1,1,1,1), dtype=theano.config.floatX)) for _ in xrange(num_input_representations)]
y_shared = theano.shared(np.zeros((1,1), dtype=theano.config.floatX))
learning_rate = theano.shared(np.array(LEARNING_RATE_SCHEDULE[0], dtype=theano.config.floatX))
idx = T.lscalar('idx')
givens = {
l0.input_var: xs_shared[0][idx*BATCH_SIZE:(idx+1)*BATCH_SIZE],
l0_45.input_var: xs_shared[1][idx*BATCH_SIZE:(idx+1)*BATCH_SIZE],
l6.target_var: y_shared[idx*BATCH_SIZE:(idx+1)*BATCH_SIZE],
}
# updates = layers.gen_updates(train_loss, all_parameters, learning_rate=LEARNING_RATE, momentum=MOMENTUM, weight_decay=WEIGHT_DECAY)
updates_nonorm = layers.gen_updates_nesterov_momentum_no_bias_decay(train_loss_nonorm, all_parameters, all_bias_parameters, learning_rate=learning_rate, momentum=MOMENTUM, weight_decay=WEIGHT_DECAY)
updates = layers.gen_updates_nesterov_momentum_no_bias_decay(train_loss, all_parameters, all_bias_parameters, learning_rate=learning_rate, momentum=MOMENTUM, weight_decay=WEIGHT_DECAY)
train_nonorm = theano.function([idx], train_loss_nonorm, givens=givens, updates=updates_nonorm)
train_norm = theano.function([idx], train_loss, givens=givens, updates=updates)
compute_loss = theano.function([idx], valid_loss, givens=givens) # dropout_active=False
compute_output = theano.function([idx], l6.predictions(dropout_active=False), givens=givens, on_unused_input='ignore') # not using the labels, so theano complains
compute_features = theano.function([idx], l4.output(dropout_active=False), givens=givens, on_unused_input='ignore')
print "Train model"
start_time = time.time()
prev_time = start_time
num_batches_valid = x_valid.shape[0] // BATCH_SIZE
losses_train = []
losses_valid = []
param_stds = []
for e in xrange(NUM_CHUNKS):
print "Chunk %d/%d" % (e + 1, NUM_CHUNKS)
chunk_data, chunk_length = train_gen.next()
y_chunk = chunk_data.pop() # last element is labels.
xs_chunk = chunk_data
# need to transpose the chunks to move the 'channels' dimension up
xs_chunk = [x_chunk.transpose(0, 3, 1, 2) for x_chunk in xs_chunk]
if e in LEARNING_RATE_SCHEDULE:
current_lr = LEARNING_RATE_SCHEDULE[e]
learning_rate.set_value(LEARNING_RATE_SCHEDULE[e])
print " setting learning rate to %.6f" % current_lr
# train without normalisation for the first # chunks.
if e >= NUM_CHUNKS_NONORM:
train = train_norm
else:
train = train_nonorm
print " load training data onto GPU"
for x_shared, x_chunk in zip(xs_shared, xs_chunk):
x_shared.set_value(x_chunk)
y_shared.set_value(y_chunk)
num_batches_chunk = x_chunk.shape[0] // BATCH_SIZE
# import pdb; pdb.set_trace()
print " batch SGD"
losses = []
for b in xrange(num_batches_chunk):
# if b % 1000 == 0:
# print " batch %d/%d" % (b + 1, num_batches_chunk)
loss = train(b)
losses.append(loss)
# print " loss: %.6f" % loss
mean_train_loss = np.sqrt(np.mean(losses))
print " mean training loss (RMSE):\t\t%.6f" % mean_train_loss
losses_train.append(mean_train_loss)
# store param stds during training
param_stds.append([p.std() for p in layers.get_param_values(l6)])
if ((e + 1) % VALIDATE_EVERY) == 0:
print
print "VALIDATING"
print " load validation data onto GPU"
for x_shared, x_valid in zip(xs_shared, xs_valid):
x_shared.set_value(x_valid)
y_shared.set_value(y_valid)
print " compute losses"
losses = []
for b in xrange(num_batches_valid):
# if b % 1000 == 0:
# print " batch %d/%d" % (b + 1, num_batches_valid)
loss = compute_loss(b)
losses.append(loss)
mean_valid_loss = np.sqrt(np.mean(losses))
print " mean validation loss (RMSE):\t\t%.6f" % mean_valid_loss
losses_valid.append(mean_valid_loss)
now = time.time()
time_since_start = now - start_time
time_since_prev = now - prev_time
prev_time = now
est_time_left = time_since_start * (float(NUM_CHUNKS - (e + 1)) / float(e + 1))
eta = datetime.now() + timedelta(seconds=est_time_left)
eta_str = eta.strftime("%c")
print " %s since start (%.2f s)" % (load_data.hms(time_since_start), time_since_prev)
print " estimated %s to go (ETA: %s)" % (load_data.hms(est_time_left), eta_str)
print
del chunk_data, xs_chunk, x_chunk, y_chunk, xs_valid, x_valid # memory cleanup
print "Compute predictions on validation set for analysis in batches"
predictions_list = []
for b in xrange(num_batches_valid):
# if b % 1000 == 0:
# print " batch %d/%d" % (b + 1, num_batches_valid)
predictions = compute_output(b)
predictions_list.append(predictions)
all_predictions = np.vstack(predictions_list)
# postprocessing: clip all predictions to 0-1
all_predictions[all_predictions > 1] = 1.0
all_predictions[all_predictions < 0] = 0.0
print "Write validation set predictions to %s" % ANALYSIS_PATH
with open(ANALYSIS_PATH, 'w') as f:
pickle.dump({
'ids': valid_ids[:num_batches_valid * BATCH_SIZE], # note that we need to truncate the ids to a multiple of the batch size.
'predictions': all_predictions,
'targets': y_valid,
'mean_train_loss': mean_train_loss,
'mean_valid_loss': mean_valid_loss,
'time_since_start': time_since_start,
'losses_train': losses_train,
'losses_valid': losses_valid,
'param_values': layers.get_param_values(l6),
'param_stds': param_stds,
}, f, pickle.HIGHEST_PROTOCOL)
del predictions_list, all_predictions # memory cleanup
# print "Loading test data"
# x_test = load_data.load_gz(DATA_TEST_PATH)
# x2_test = load_data.load_gz(DATA2_TEST_PATH)
# test_ids = np.load("data/test_ids.npy")
# num_test = x_test.shape[0]
# x_test = x_test.transpose(0, 3, 1, 2) # move the colour dimension up.
# x2_test = x2_test.transpose(0, 3, 1, 2)
# create_test_gen = lambda: load_data.array_chunker_gen([x_test, x2_test], chunk_size=CHUNK_SIZE, loop=False, truncate=False, shuffle=False)
print "Computing predictions on test data"
predictions_list = []
for e, (xs_chunk, chunk_length) in enumerate(create_test_gen()):
print "Chunk %d" % (e + 1)
xs_chunk = [x_chunk.transpose(0, 3, 1, 2) for x_chunk in xs_chunk] # move the colour dimension up.
for x_shared, x_chunk in zip(xs_shared, xs_chunk):
x_shared.set_value(x_chunk)
num_batches_chunk = int(np.ceil(chunk_length / float(BATCH_SIZE))) # need to round UP this time to account for all data
# make predictions for testset, don't forget to cute off the zeros at the end
for b in xrange(num_batches_chunk):
# if b % 1000 == 0:
# print " batch %d/%d" % (b + 1, num_batches_chunk)
predictions = compute_output(b)
predictions_list.append(predictions)
all_predictions = np.vstack(predictions_list)
all_predictions = all_predictions[:num_test] # truncate back to the correct length
# postprocessing: clip all predictions to 0-1
all_predictions[all_predictions > 1] = 1.0
all_predictions[all_predictions < 0] = 0.0
print "Write predictions to %s" % TARGET_PATH
# test_ids = np.load("data/test_ids.npy")
with open(TARGET_PATH, 'wb') as csvfile:
writer = csv.writer(csvfile) # , delimiter=',', quoting=csv.QUOTE_MINIMAL)
# write header
writer.writerow(['GalaxyID', 'Class1.1', 'Class1.2', 'Class1.3', 'Class2.1', 'Class2.2', 'Class3.1', 'Class3.2', 'Class4.1', 'Class4.2', 'Class5.1', 'Class5.2', 'Class5.3', 'Class5.4', 'Class6.1', 'Class6.2', 'Class7.1', 'Class7.2', 'Class7.3', 'Class8.1', 'Class8.2', 'Class8.3', 'Class8.4', 'Class8.5', 'Class8.6', 'Class8.7', 'Class9.1', 'Class9.2', 'Class9.3', 'Class10.1', 'Class10.2', 'Class10.3', 'Class11.1', 'Class11.2', 'Class11.3', 'Class11.4', 'Class11.5', 'Class11.6'])
# write data
for k in xrange(test_ids.shape[0]):
row = [test_ids[k]] + all_predictions[k].tolist()
writer.writerow(row)
print "Gzipping..."
os.system("gzip -c %s > %s.gz" % (TARGET_PATH, TARGET_PATH))
del all_predictions, predictions_list, xs_chunk, x_chunk # memory cleanup
# # need to reload training data because it has been split and shuffled.
# # don't need to reload test data
# x_train = load_data.load_gz(DATA_TRAIN_PATH)
# x2_train = load_data.load_gz(DATA2_TRAIN_PATH)
# x_train = x_train.transpose(0, 3, 1, 2) # move the colour dimension up
# x2_train = x2_train.transpose(0, 3, 1, 2)
# train_gen_features = load_data.array_chunker_gen([x_train, x2_train], chunk_size=CHUNK_SIZE, loop=False, truncate=False, shuffle=False)
# test_gen_features = load_data.array_chunker_gen([x_test, x2_test], chunk_size=CHUNK_SIZE, loop=False, truncate=False, shuffle=False)
# for name, gen, num in zip(['train', 'test'], [train_gen_features, test_gen_features], [x_train.shape[0], x_test.shape[0]]):
# print "Extracting feature representations for all galaxies: %s" % name
# features_list = []
# for e, (xs_chunk, chunk_length) in enumerate(gen):
# print "Chunk %d" % (e + 1)
# x_chunk, x2_chunk = xs_chunk
# x_shared.set_value(x_chunk)
# x2_shared.set_value(x2_chunk)
# num_batches_chunk = int(np.ceil(chunk_length / float(BATCH_SIZE))) # need to round UP this time to account for all data
# # compute features for set, don't forget to cute off the zeros at the end
# for b in xrange(num_batches_chunk):
# if b % 1000 == 0:
# print " batch %d/%d" % (b + 1, num_batches_chunk)
# features = compute_features(b)
# features_list.append(features)
# all_features = np.vstack(features_list)
# all_features = all_features[:num] # truncate back to the correct length
# features_path = FEATURES_PATTERN % name
# print " write features to %s" % features_path
# np.save(features_path, all_features)
print "Done!"
| bsd-3-clause |
MohammedWasim/scikit-learn | doc/tutorial/text_analytics/solutions/exercise_02_sentiment.py | 254 | 2795 | """Build a sentiment analysis / polarity model
Sentiment analysis can be casted as a binary text classification problem,
that is fitting a linear classifier on features extracted from the text
of the user messages so as to guess wether the opinion of the author is
positive or negative.
In this examples we will use a movie review dataset.
"""
# Author: Olivier Grisel <[email protected]>
# License: Simplified BSD
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.svm import LinearSVC
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
from sklearn.datasets import load_files
from sklearn.cross_validation import train_test_split
from sklearn import metrics
if __name__ == "__main__":
# NOTE: we put the following in a 'if __name__ == "__main__"' protected
# block to be able to use a multi-core grid search that also works under
# Windows, see: http://docs.python.org/library/multiprocessing.html#windows
# The multiprocessing module is used as the backend of joblib.Parallel
# that is used when n_jobs != 1 in GridSearchCV
# the training data folder must be passed as first argument
movie_reviews_data_folder = sys.argv[1]
dataset = load_files(movie_reviews_data_folder, shuffle=False)
print("n_samples: %d" % len(dataset.data))
# split the dataset in training and test set:
docs_train, docs_test, y_train, y_test = train_test_split(
dataset.data, dataset.target, test_size=0.25, random_state=None)
# TASK: Build a vectorizer / classifier pipeline that filters out tokens
# that are too rare or too frequent
pipeline = Pipeline([
('vect', TfidfVectorizer(min_df=3, max_df=0.95)),
('clf', LinearSVC(C=1000)),
])
# TASK: Build a grid search to find out whether unigrams or bigrams are
# more useful.
# Fit the pipeline on the training set using grid search for the parameters
parameters = {
'vect__ngram_range': [(1, 1), (1, 2)],
}
grid_search = GridSearchCV(pipeline, parameters, n_jobs=-1)
grid_search.fit(docs_train, y_train)
# TASK: print the cross-validated scores for the each parameters set
# explored by the grid search
print(grid_search.grid_scores_)
# TASK: Predict the outcome on the testing set and store it in a variable
# named y_predicted
y_predicted = grid_search.predict(docs_test)
# Print the classification report
print(metrics.classification_report(y_test, y_predicted,
target_names=dataset.target_names))
# Print and plot the confusion matrix
cm = metrics.confusion_matrix(y_test, y_predicted)
print(cm)
# import matplotlib.pyplot as plt
# plt.matshow(cm)
# plt.show()
| bsd-3-clause |
dneuman/Weather | Weather.py | 1 | 93251 | #!/usr/bin/env python
"""
Weather Module
**************
Routines to deal with bulk weather data from Environment Canada
WxDF Class
----------
* Update - Add years to data
* Save - Save consolidated data
* GetFirstDay - returns index to first day of data
* GetLastDay - Returns index to last day of data
* GetMonths - Return data grouped by months
* GetMonth - Return data grouped by years for a single month
* GetYears - Return data grouped by years
Data Plotting
-------------
* Plot - Temperature plot with optional annotations
* RecordsPlot - Show all records on one graph
* RecordsRatioPlot - Ratio of high to low records by decade
* DayPlot - Matrix of every day colored by the type of day
* DayCountPlot - Shows how types of days change over time
* DayThreshPlot - How many days per year above or below threshold
* TemperatureCountPlot - Days in each temperature range per year
* WarmPlot - Shows how the warm season changes over time
* WarmDaysPlot - How many days in the warm season
* SnowPlot - Plots first and last snowfall
* TopPrecipPlot - Amount of precipitation on top days
* StormPlot - Amount of precipitation of top storms (over multiple days)
* MonthRange - High and Low range for a single month
Miscellaneous
-------------
* GridPlot - Put several plots on one page
* CompareSmoothing - Show how Lowess and WMA compare for trends
* CompareWeighting - Show how different weight windows compare
Requirements
------------
Requires Python 3 (tested on 3.6.1, Anaconda distribution)
"""
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import datetime as dt
import Smoothing as sm
import Annotate as at
import pathlib
import matplotlib.patches as mpatches
import matplotlib.lines as mlines
import matplotlib.dates as mdates
import matplotlib.patheffects as path_effects
from Texture import Texture
#%precision 2
pd.options.display.float_format = '{:.1f}'.format # change print format
plt.style.use('weather')
# http://matplotlib.org/users/style_sheets.html
# matplotlib.style.available shows available styles
# matplotlib.style.library is a dictionary of available styles
# user styles can be placed in ~/.matplotlib/
#Default settings for trend lines
trendDefault = {'size':21, 'trend':'wma', 'pad':'linear', 'follow':1}
""" size : int default 21
Size of the moving average window. Larger values give smoother
results.
trend : str ['wma' | 'lowess' | 'ssa'] default 'wma'
Which smoothing algorithm to use.
pad : str ['linear' | 'mirror' | None] default 'linear'
What kind of padding to use on the trend line
follow : int default 1
Determines how closely to follow the data. Only used for
'lowess' (determines the polynomial to use) and 'ssa' (determines
how many reconstructed principles to use).
"""
class Settings():
"""Simple class to hold settings, making out of scope variables more
obvious. Use str(obj) to get list of settings, or just 'obj' at the
command line.
"""
_desc = {} # description of variables
basepath = '/Users/Dan/Documents/Weather/Stations/'
source = "Data: Environment Canada"
tlw = 4; _desc['tlw']='trend linewidth'
dlw = 1; _desc['dlw']='data linewidth'
ta = 0.99; _desc['ta']='trend alpha'
da = 0.3; _desc['da']='data alpha'
sa = 0.15; _desc['sa']='std dev alpha'
ma = 0.1; _desc['ma']='max/min alpha'
spring = [3, 4, 5]; _desc['spring']='months for spring'
summer = [6, 7, 8]; _desc['summer']='months for summer'
fall = [9, 10, 11]; _desc['fall']='months for fall'
winter = [12, 1, 2]; _desc['winter']='months for winter'
colors = {'doc':'Color from cycle to use per column',
'Max Temp (°C)':'C0', 4:'C0',
'Min Temp (°C)':'C1', 6:'C1',
'Mean Temp (°C)':'C2', 8:'C2',
'Total Rain (mm)':'C2', 14:'C2',
'Total Snow (cm)':'C1', 16:'C1',
'Total Precip (mm)':'C3', 18:'C3'} # colors to use per column
monthS = {'doc':'Return short month name',
0:'Yr ', 1:'Jan', 2:'Feb', 3:'Mar', 4:'Apr',
5:'May', 6:'Jun', 7:'Jul', 8:'Aug',
9:'Sep', 10:'Oct', 11:'Nov', 12:'Dec'}
monthL = {'doc':'Return long month name',
0:'Year', 1:'January', 2:'February', 3:'March', 4:'April',
5:'May', 6:'June', 7:'July', 8:'August',
9:'September', 10:'October', 11:'November', 12:'December'}
monthN = {'doc':'Return index for supplied long or short month name',
'Jan':1, 'Feb':2, 'Mar':3, 'Apr':4,
'May':5, 'Jun':6, 'Jul':7, 'Aug':8,
'Sep':9, 'Oct':10, 'Nov':11, 'Dec':12,
'January':1, 'February':2, 'March':3, 'April':4,
'May':5, 'June':6, 'July':7, 'August':8,
'September':9, 'October':10, 'November':11, 'December':12}
def __repr__(self):
# Return list of variables and their descriptions in printable format
s = "\nWeather Module Settings:\n\n"
for key, value in Settings.__dict__.items():
if type(key) != str:
continue
if key.startswith('_'):
continue
if type(value)==dict and 'doc' in value:
s += '{:<8} dict: {}\n'.format((key+':'), value['doc'])
else:
s += '{:<8} {:<12}{}\n'.format((key+':'),
repr(value),
self._desc.get(key,''))
return s
st = Settings() # st contains system settings
class WxDF(pd.DataFrame):
"""Weather data management class
Requires a table of cities (at least one) in the same directory as the
calling program. The table must be a CSV file named 'Cities.csv' with the
following headings:
city, station, path
Examples
--------
::
wf = WxDF() # returns first city in _cf list
wf = WxDF(3) # returns city in _cf.iloc[3]
wf = WxDF(3, df) # returns WxDF from df, with attributes from _cf
wf = WxDF(df) # returns WxDF from df, with attributes empty
Typing `wf` by itself in iPython will print a summary of the data.
"""
# Specify column locations here. These should be used by routines
# The column names can be determined by the df.columns list
temps = [4, 6, 8] # max, min, average temps
precips = [14, 16, 18] # rain, snow, precipitation
(tmx, tmn, tav) = temps
(rn, sn, pr) = precips
wind = 24
wdir = 22
qual = 3 # data quality column
_nonHeadRows = 25
_dataTypes = { #0: np.datetime64, # "Date/Time" (not used as it is index)
1: int, # "Year"
2: int, # "Month",
3: int, # "Day",
4: str, # "Data Quality",
5: float, # "Max Temp (°C)",
6: str, # "Max Temp Flag",
7: float, # "Min Temp (°C)",
8: str, # "Min Temp Flag",
9: float, # "Mean Temp (°C)",
10: str, # "Mean Temp Flag",
11: float, # "Heat Deg Days (°C)",
12: str, # "Heat Deg Days Flag",
13: float, # "Cool Deg Days (°C)",
14: str, # "Cool Deg Days Flag",
15: float, # "Total Rain (mm)",
16: str, # "Total Rain Flag",
17: float, # "Total Snow (cm)",
18: str, # "Total Snow Flag",
19: float, # "Total Precip (mm)",
20: str, # "Total Precip Flag",
21: float, # "Snow on Grnd (cm)",
22: str, # "Snow on Grnd Flag",
23: float, # "Dir of Max Gust (10s deg)",
24: str, # "Dir of Max Gust Flag",
25: float, # "Spd of Max Gust (km/h)",
26: str # "Spd of Max Gust Flag"
}
_metadata = ['city','period','type','path','station','id','baseline']
_cf = None # city information
def __init__(self, *args, **kwargs):
"""Initialize WxDF object
Parameters
----------
id : int (optional, default 0)
Row in the Cities.csv table to use to initialize data
df : DataFrame (optional)
Use this dataframe if provided, otherwise load data from disk
using the path found in Cities.csv
"""
if WxDF._cf is None:
WxDF._cf = pd.read_csv('Cities.csv',
header=0,
skipinitialspace=True,
index_col=False)
if len(args) == 0 or type(args[0]) == int:
if len(args) == 0: id = 0
else: id = args[0]
if len(args) == 2 and type(args[1]) == pd.DataFrame:
df = args[1]
else:
df = pd.read_csv(st.basepath+WxDF._cf.path[id],
index_col=0,
header=0,
dtype=WxDF._dataTypes,
parse_dates=True)
super(WxDF, self).__init__(df)
self.city = WxDF._cf.city[id]
self.path = WxDF._cf.path[id]
self.station = WxDF._cf.station[id]
self.id = id
self.type = 'daily'
self.period = 'daily'
else:
super(WxDF, self).__init__(*args, **kwargs)
self.city = ''
self.type = ''
self.path = ''
self.period = ''
# Add a minimum 10-year baseline. Try period up to 1920, otherwise
# try 1961-1990, otherwise 10 years from beginning, otherwise,
# just first year.
# Pre-1920 is the pre-industrial period. 1961-1990 highlights change
# since around 1975 and is commonly used in literature.
gap = 10
if type(self.index[0]) == pd.Timestamp:
fy = self.index[0].year
ly = self.index[-1].year
else:
fy = self.index[0]
ly = self.index[-1]
if fy <= 1920 - gap:
bls = fy
ble = 1920
elif fy <= 1990 - gap:
bls = max([1961, fy])
ble = 1990
elif ly - fy >= gap-1:
bls = fy
ble = bls + gap-1
else:
bls = ble = fy
self.baseline = [bls, ble]
return
@property
def _constructor(self):
return WxDF
def __str__(self):
"""Return a formatted summary of the data
"""
labels = ['Qual', 'Tmax', 'Tmin', 'Tavg', 'Rain', 'Snow', 'Prec']
names = ['Data Quality', 'Max Temp (°C)', 'Min Temp (°C)',
'Mean Temp (°C)', 'Total Rain (mm)', 'Total Snow (cm)',
'Total Precip (mm)']
cols = [self.qual, self.tmx, self.tmn,
self.tav, self.rn, self.sn, self.pr]
# make map of column name to short label
hMap = dict(zip(names, labels))
mincw = 7 # minimum column width
def GetLine(r):
"""Format row r into a string
"""
# hdgs and lbl are defined outside function
if hasattr(self.index[0], 'year'):
st = str(self.index[r].date()).ljust(10)
else:
st = str(self.index[r]).ljust(10)
for i, c in enumerate(lbl):
num = max(mincw, len(hdgs[i]))
if type(self[c].iloc[r]) is np.float64:
st = st + '{:.1f}'.format(self[c].iloc[r]).rjust(num)
else:
st = st + str(self[c].iloc[r]).rjust(num)
return st
# Set up the headings to use. The period attribute will determine
# what to use. If period does not exist, just show first 4 columns.
hdgs = ' Undefined'
if not hasattr(self, 'period'):
num = min(4, len(self.columns))
lbl = list(self.columns[0:num])
hdgs = [l.rjust(max(mincw,len(l))) for l in lbl]
elif self.period == 'daily':
full = list(hMap.keys())
avail = list(self.columns)
lbl = []
for h in full:
if h in avail:
lbl.append(h)
hdgs = [hMap[h] for h in lbl]
hdgs = [h.rjust(max(mincw,len(h))) for h in hdgs]
elif self.period == 'monthly':
cols = [0, 4, 8, 11] # months to show
lbl = list(self.columns[cols])
hdgs = [l.rjust(max(mincw,len(l))) for l in lbl]
elif self.period == 'annual':
lbl = list(self.columns)
hdgs = [l.rjust(max(mincw,len(l))) for l in lbl]
first = self.GetFirstDay()
last = self.GetLastDay()
# create heading
s = ''
if hasattr(self, 'city') and self.city is not None:
s = "City: {0} Type: {1}\n".format(self.city, self.type)
s = s + "Date " + "".join(hdgs)
# add beginning lines
if first > 0:
s = '\n'.join([s, GetLine(0)])
if first > 2:
s = '\n'.join([s, '...'])
if first > 1:
s = '\n'.join([s, GetLine(first-1)])
if (last-first)<32:
for i in range(first, last+1):
s = '\n'.join([s, GetLine(i)])
else:
for i in range(first, first+5):
s = '\n'.join([s, GetLine(i)])
s = '\n'.join([s,'...'])
num = min(len(self.index), last+2)
for i in range(last-9, num):
s = '\n'.join([s, GetLine(i)])
s = '\n'.join([s, '[{}r x {}c]'.format(len(self.index),
len(self.columns))])
if hasattr(self, 'city'):
if hasattr(self.index[first], 'year'):
years = self.index[last].year - self.index[first].year
else:
years = self.index[last] - self.index[first]
s = s + ' Years: ' + str(years+1)
return s
def Str(self):
print(self.__str__)
def _GetData(self, year=None, raw=False):
"""Get a year's worth of data from Environment Canada site.
Parameters
----------
year : int opt default current year
Year to retrieve. Defaults to current year.
raw : boolean opt default False
If True, do no explicit conversion of supplied data. Use this
to help debug.
"""
if year is None:
year = dt.date.today().year
baseURL = ("http://climate.weather.gc.ca/climate_data/bulk_data_e.html?"
"format=csv&stationID={stn}&Year={yr}&timeframe=2"
"&submit=Download+Data")
url = baseURL.format(stn=self.station,
yr=year)
if raw:
df = pd.read_csv(url, skiprows=self._nonHeadRows,
index_col=0,
parse_dates=True,
na_values=['M','<31'])
return df
df = pd.read_csv(url, skiprows=self._nonHeadRows,
index_col=0,
parse_dates=True,
dtype=self._dataTypes,
na_values=['M','<31'])
return WxDF(self.id, df)
def Update(self, sYear=None, eYear=None):
"""Merge desired years from online database,
Parameters
----------
sYear : int opt
Start year, defaults to year of last data point.
eYear : int opt
End year, defaults to current year.
"""
def Combine(orig, new):
new.dropna(thresh=5, inplace=True)
for row in new.index:
orig.loc[row] = new.loc[row]
if (eYear is None):
eYear = dt.date.today().year
if (sYear is None):
sYear = self.index[self.GetLastDay()].year
for theYear in range(sYear, eYear+1):
nf = self._GetData(theYear)
Combine(self, nf)
def Save(self):
"""Save consolidated weather data into a .csv file. Directories are
created as required.
"""
file = "".join([st.basepath, self.path])
p = pathlib.Path(file)
p.parent.mkdir(parents=True, exist_ok=True)
self.to_csv(file,
float_format="% .1f")
def GetBaseAvg(self, col=None, range=None):
"""Get the average value over the baseline period for a column.
Parameters
----------
col : int or str default None
Column to get average for. Can be the column number, or its name.
If None, all columns are returned.
range : list of ints opt default None
Optional range to compute over if not standard baseline. Must be
a list with the start and end years (inclusive), eg
``range = [1975, 1990]``
"""
if col and type(col) is int:
col = self.columns[col]
if not range:
bls = self.baseline[0]
ble = self.baseline[1]
else:
bls = range[0]
ble = range[1]
if col:
return self.loc[bls:ble, col].mean()
else:
return self.loc[bls:ble].mean()
def GetFirstDay(self):
"""Return index to first day with valid data.
Returns
-------
Integer: df.iloc[i] will give data on first day.
"""
col = min(4, len(self.columns)-1)
i=-1
for i in range(len(self.index)):
if not np.isnan(self.iat[i,col]): break
return i
def GetLastDay(self):
"""Return index to last day with valid data.
Returns
-------
Integer: df.iloc[i] will give data on last day.
"""
col = min(4, len(self.columns)-1)
i = len(self.index)-1
for i in range(len(self.index)-1,-1,-1):
if not np.isnan(self.iat[i,col]): break
return i
def GetMonths(self, col, func=np.mean):
"""Convert daily data to monthly data
Parameters
----------
col : int
Column to be combined.
func : function opt default np.mean
Function to use for combining. np.min, np.max and np.sum are
also useful.
Returns
-------
Returns dataframe (not WxDF) with the grouped monthly data in each
column.
Note
----
Only works for 1 column at a time due to extra complexity of multi-level
axes when months are already columns.
"""
label = self.columns[col]
avgs = self.pivot_table(values=[label],
index=['Year'],
columns=['Month'],
aggfunc=func)
avgs = avgs[label] # turn into simple dataframe for simplicity
colnames = dict(zip(list(range(13)), st.monthS))
avgs.rename(columns=colnames, inplace=True)
mf = WxDF(self.id, avgs)
mf.period = 'monthly'
mf.type = func.__name__ + ' ' + label
return mf
def GetMonth(self, cols=[4, 6], month=None, func=np.mean):
"""Convert daily data to yearly data for a particular month
Parameters
----------
cols : list opt default [4, 6] (max, min temps)
List of columns to be combined
month : int opt
Month to combine data for. Defaults to current month
func : function opt default np.mean
Function that combines the data. np.min, np.max and np.sum are
also useful.
Returns
-------
pandas.DataFrame containing monthly data by year
"""
if month == 0: # Return full year
return self.GetYears(cols=cols, func=func)
if month is None:
month = dt.date.today().month
labels = list(self.columns[cols])
yf = self.loc[lambda df: df.Month == month]
mf = yf.pivot_table(values=labels,
index=['Year'],
aggfunc=func)
mf = WxDF(self.id, mf[labels])
mf.period = 'annual'
mf.type = func.__name__.title() + ' for ' + st.monthL[month]
# Columns possibly in wrong order, so make sure they're ordered
# as given.
return mf
def GetYears(self, cols=[4, 6, 8], func=np.mean):
"""Convert daily data to yearly data.
cols : list of ints opt default [4, 6, 8] (max, min, average temps)
Columns to be combined. Defaults to min, max, avg temps
func : function opt default np.mean
Function to use for combining. np.min, np.max and np.sum are
also useful.
Returns
-------
pandas.DataFrame with the data grouped by year
"""
labels = self.columns[cols]
yf = self.pivot_table(values=list(labels),
index=['Year'],
aggfunc=func)
# Drop last year if incomplete
date = df.index[df.GetLastDay()]
if date.dayofyear < 365:
yf.drop(date.year, inplace=True)
yf = WxDF(self.id, yf[labels])
yf.period = 'annual'
yf.type = func.__name__.title() + ' for Year'
return yf
def _AddEOY(df, col, offset=0, ax=None, legend=True, onlymean=True,
func=np.mean):
"""Make an estimate of the mean temperature for the last year in data.
Parameters
----------
df : WxDF or pd.DataFrame
Data to be analyzed. Expects columns to be in WxDF format.
col : int
Column to use for calculation
offset : int default 0
Offset to use, eg for when plotting against a baseline. This is
subtracted from the actual value.
ax : matplotlib.Axis or None, default None
Axis to plot on. Will not plot if None. If provided, will plot a box
plot showing the mean, 2-sigma (95%), min and max values.
legend : bool default True
Flag to include labels in legend.
onlymean : bool default True
Only add mean to graph
func : function default np.mean
function to use for aggregating annual data. Use np.mean for
temperatures, and np.sum for precipitation.
Returns
-------
mean : float
Estimated final mean temperature for final year
sigma : float
Standard deviation from all previous years for the rest of the year.
max : float
Maximum seen deviation from temperature to present day of year.
min : float
Minimum seen deviation from temperature to present day of year.
Note
----
Return values have the offset subtracted from them.
"""
if type(col) == str:
tcol = col
else:
tcol = df.columns[col]
tcols = df.columns[[0,4,6,8,14,16,18]]
date = df.index[df.GetLastDay()]
yr = date.year
dy = date.dayofyear
# get days in full year
fy = pd.Timestamp(dt.date(yr, 12, 31)).dayofyear
df = df.loc[:,tcols] # only use useful columns
df['dy'] = df.index.dayofyear
# For all previous years, get days up to and including last day,
# and days afterwards. Then get the sum for each year.
bf = df.loc[df.dy <= dy] # beginning
ef = df.loc[df.dy > dy] # end
yf = bf.groupby('Year').aggregate(func)
yf['end'] = ef[['Year',tcol]].groupby('Year').aggregate(func)
# The difference between beginning of year average temperature should be
# correlated with the end of year temp, so calculate this for every year,
# then get the stats for the differences to determine how the end of the
# last year should end up.
yf['diff'] = yf['end'] - yf[tcol]
# Get results weighted by amount of year left
bw = dy/fy # beginning weight
ew = 1.0 - bw # end weight
yb = yf.loc[yr, tcol] # beginning temp
yAvg = yb * bw + (yb + yf['diff'].mean()) * ew - offset
yMax = yb * bw + (yb + yf['diff'].max()) * ew - offset
yMin = yb * bw + (yb + yf['diff'].min()) * ew - offset
yStd = yf['diff'].std(ddof=0) * ew
if ax:
ys = str(yr)
ps = ms = es = ''
if legend:
ps = ys+' (est)'
ms = ys+' Min/Max'
es = ys+' 95% range'
ax.plot(yr, yAvg, 'ko', label=ps)
if not onlymean:
ax.plot([yr, yr], [yMax, yMin], 'k_-', lw=1., label=ms, ms=7,
alpha=0.8)
ax.plot([yr, yr], [yAvg+2*yStd, yAvg-2*yStd], '-', color='orange',
alpha=0.5, lw=7, label=es)
return yAvg, yStd, yMax, yMin
def _GetTrendArgs(sTrend=trendDefault, **kwargs):
"""Utility to return a new dict with updated trend keyword values
Parameters
----------
sTrend : dict default trendDefault
starting dictionary of trend keywords to be updated
**kwargs : keywords and values with which to update the trend dict
Must supply '**kwargs', not 'kwargs', or else kwargs will replace
sTrend.
Returns
-------
newTrend : updated dictionary of trend keywords and values.
"""
newTrend = sTrend.copy()
newTrend.update(kwargs)
return newTrend
def Plot(df, rawcols=None, trendcols=None, ratecols=None,
func=None, change=True, est=True, **kwargs):
"""Plot indicated columns of data, including the moving average.
Parameters
----------
df : WxDF
DataFrame containing daily data. Can be a pandas.DataFrame with a
.city attribute added.
rawcols : list of ints default [8] (Mean Temp)
Columns to plot as raw data.
trendcols : list of ints default [8] (Mean Temp)
Columns to plot as trend lines.
ratecols : list of ints default [8] (Mean Temp)
Columns to add rate lines to.
func : function default np.mean
Function used to aggregate the annual data. Use np.sum
for precipitation.
change : bool default True
Flag determines if change from baseline desired.
est : bool default True
Include current incomplete year as an estimate.
kwargs : **dict
keywords to pass to the smoothing function that override the default
values. See Smoothing.Smooth or the trendDefault definition for
explanation.
Notes
-----
If only rawcols are provided, trend and rate lines will be added. Use
empty lists if these aren't desired.
It is possible to use both temperature and precipitation columns, but
this will generally be not useful, and the labels will be incorrect.
"""
trend = _GetTrendArgs(**kwargs)
# get a list of all desired columns (check for None)
allcols = set()
for s in [rawcols, trendcols, ratecols]:
if s: allcols = allcols.union(set(s))
allcols = list(allcols)
# set up defaults. If first supplied columns is temperature, make
# defaults temperature, otherwise precipitation.
if len(allcols)==0 or (allcols[0] in df.temps):
unitstr = ' (°C)'
typestr = 'Temperature'
ratestr = '{:.2f}°C/decade'
if not func: func = np.mean
if not rawcols: rawcols = [df.tav]
if not trendcols: trendcols = rawcols
if not ratecols: ratecols = trendcols
else:
unitstr = ' (mm/cm)'
typestr = 'Precipitation'
ratestr = '{:.1f}/decade'
if not func: func = np.sum
if not rawcols: rawcols = [df.pr] # total precipitation
if not trendcols: trendcols = rawcols
if not ratecols: ratecols = trendcols
allcols = list(set().union(set(rawcols), set(trendcols), set(ratecols)))
yf = df.GetYears(cols=allcols, func=func)
offset = yf.GetBaseAvg() # offset is used later
if change:
yf = yf - offset
ychstr = ' Change From Baseline'
chstr = 'Change in '
else:
offset[:] = 0
ychstr = ''
chstr = ''
cols = yf.columns
fig = plt.figure(df.city+'_Plot')
fig.clear() # May have been used before
ax = fig.add_subplot(111)
# Create legend entries manually
handles = []
if len(rawcols) > 0:
line = mlines.Line2D([], [], color='k', marker='o',
alpha=st.da, lw=st.dlw, label='Raw Data')
handles.append(line)
if len(trendcols) > 0:
line = mlines.Line2D([], [], color='k', alpha=st.ta, lw=st.tlw,
label=trend['trend'].upper()+' Trend')
handles.append(line)
for col in cols:
s = yf[col]
if est:
r = _AddEOY(df, col, offset[col], func=func)
s[s.index[-1]+1] = r[0] # add current year estimate
c = st.colors[col]
# add a legend entry for this color
line = mpatches.Patch(color=c, label=col)
handles.append(line)
a = sm.Smooth(s, trend)
if col in df.columns[rawcols]:
ax.plot(s, 'o-', alpha=st.da, lw=st.dlw, color=c)
if col in df.columns[trendcols]:
ax.plot(a, '-', alpha=st.ta, lw=st.tlw, color=c)
if col in df.columns[ratecols]:
# fit line to recent data
# Use smoothed line for rate since different methods may reduce
# influence of outliers.
at.AddRate(a.loc[1970:], label=ratestr)
# Label chart
plt.ylabel(typestr + ychstr + unitstr)
plt.title(chstr + df.city + "'s Annual " + typestr)
# Annotate chart
if change:
at.Baseline(df.baseline)
at.Attribute(source=st.source, ha='left')
plt.legend(handles=handles, loc=2)
at.AddYAxis(ax)
fig.show()
return
def RecordsPlot(df, use=[0,1,2,3,4,5], stack=False):
"""Plot all records in daily data.
Parameters
----------
df : WxDF
object containing daily data for a location. Can use a
pandas.DataFrame if df.city comtains the name of the city.
use : list of int opt default [0,1,2,3,4,5]
List of records to show. They are any of
0. Max Day,
1. Min Day,
2. Max Night
3. Min Night
4. Rain
5. Snow
stack : boolean default False
Show the record counts for each year in a separate stackplot.
"""
start = 1960 # start date for x-axis
# set up data for each set of records:
# [Name, df column, mark color and format, zorder]
props = [
['Max Day', df.tmx, '^', 'C0', 6, float.__gt__, -100.0],
['Min Day', df.tmx, 'v', 'C3', 5, float.__lt__, 100.0],
['Max Night', df.tmn, '^', 'C5', 4, float.__gt__, -100.0],
['Min Night', df.tmn, 'v', 'C1', 3, float.__lt__, 100.0],
['Rain', df.rn, 'o', 'C2', 2, float.__gt__, -100.0],
['Snow', df.sn, 'H', 'C5', 1, float.__gt__, -100.0],
]
props = [props[i] for i in use]
columns = [p[0] for p in props]
fig = plt.figure(df.city+'_Records')
fig.clear()
ax = fig.add_subplot(111)
plt.subplots_adjust(bottom=0.1)
# Set up axis formatting
at.MonthFmt(ax)
# ticks must be set before the first plot, or they will be locked in
ax.set_ylim((dt.date(2015,12,16), dt.date(2017,1,14)))
ax.set_xticks(np.arange(start, 2021, 5))
ax.set_xlim((start-2, 2022))
# will look up records by month/day (1-indexed)
r = np.zeros((13, 32), dtype=float)
yrmin = df.index[0].year
yrmax = df.index[-1].year
counts = np.zeros((len(use), (yrmax-yrmin+1)), dtype=int)
for pi, p in enumerate(props):
print(p[0])
# contains running list of records. x is the year, y is the date.
x = []
y = []
# choose appropriate comparison function. The 'Min' records are '<'
compare = p[5]
r[:,:] = p[6]
for i in range(len(df.index)):
s = df.iat[i, p[1]]
month = df.iat[i, 1]
day = df.iat[i, 2]
if compare(s, r[month,day]):
r[month,day] = s
x.append(df.index[i].year)
y.append(df.index[i].replace(year=2016))
counts[pi, df.iat[i,0]-yrmin] += 1
# drop any dates before 1960
for i in range(len(x)):
if x[i] >= start:
break
x = x[i:]
y = y[i:]
ax.plot(x, y,
marker=p[2], color=p[3], ls='',
label=p[0], zorder=p[4])
# annotate axes
ax.legend(loc='upper left', ncol=6,
bbox_to_anchor=(0, -0.04), handlelength=0.8)
plt.title('Daily Weather Records for ' + df.city)
# Add second y-axis
at.AddYAxis(ax, month=True)
plt.show()
if not stack: return
# Plot number of records per year in a stacked bar chart
x = list(range(yrmin, yrmax+1))
fig = plt.figure(df.city+'_Records_Bar')
fig.clear()
ax = fig.add_subplot(111)
plt.axis([start, 2020, 0, 45])
ax.set_xticks(np.arange(start, 2021, 5))
ax.set_yticks(np.arange(0, 46, 5))
plt.stackplot(x, counts, alpha=0.7)
plt.legend(columns)
plt.title('Records per Year for ' + df.city)
plt.show()
print('Done')
return
def RecordsRatioPlot(df):
"""Find ratio of warm records to cold records for each year
"""
grp = 10 # number of years to group together
yr = 'Year'
# create dataframe to hold yearly count of records
cf = pd.DataFrame(index=np.arange(df.index[0].year,
df.index[-1].year+1, grp))
# will look up records by day of year (1-indexed)
cr = np.zeros(367, dtype=float)
for col, ct in zip(df.temps[:2], ['D', 'N']):
for t, comp, lim in zip(['H', 'L'],
[float.__gt__, float.__lt__],
[-1000, 1000]):
r = [] # list of record days
cr[:] = lim
print(ct+t)
for i in range(len(df.index)):
d = df.index[i]
val = df.iat[i, col]
if comp(val, cr[d.dayofyear]):
cr[d.dayofyear] = val
# add date, grouped year, and value to list
r.append([d, np.floor(d.year/grp)*grp, val])
cn = df.columns[col]
rf = pd.DataFrame(r, columns=['Date', yr, cn])
cf[ct+t] = rf[[yr, cn]].groupby(yr).count()
cf['H'] = cf['DH'].add(cf['NH'], fill_value=0)
cf['L'] = cf['DL'].add(cf['NL'], fill_value=0)
cf['H/L'] = cf['H']/cf['L']
cf['DH/NL'] = cf['DH']/cf['NL']
fig = plt.figure(df.city+'_Records_Ratio')
fig.clear()
ax = fig.add_subplot(111)
xticks = list(cf.index)
xlabels = ["{}s".format(s) for s in xticks]
ax.set_xticks(xticks)
ax.set_xticklabels(xlabels, rotation=45, ha='right')
ax.plot(cf['H/L'], 'o-', color='C0', label='',
lw=st.tlw, alpha=st.ta)
ax.set_title('Ratio of Record Highs to Lows (Day + Night)\n'
'for ' + df.city + ', Grouped by Decade')
ax.set_ylabel('Ratio of Record Highs to Lows')
ax.axhline(1, ls='--', color='k', lw=1)
at.Attribute(ax, ha='left', va='top', source=st.source)
at.AddYAxis(ax)
plt.show()
return cf
def DayPlot(df, start=1940, use = [0,1,2,3,4,5,6,7]):
"""Go through all data and plot what the weather was like for each day.
Parameters
----------
df : WxDF
object containing daily data for a location. Can use a
pandas.DataFrame if df.city comtains the name of the city.
start : int default 1940
Year to start the plot.
use : list of int default [0,1,2,3,4,5,6]
Data to plot.
Note
----
Colours are determined by the stylesheet
"""
fig = plt.figure(df.city+'_Days')
fig.clear()
ax = fig.add_subplot(111)
# Set up axis formatting
at.MonthFmt(ax)
# ticks must be set before the first plot, or they will be locked in
# Adds space, and assumes data is for 2016.
ax.set_ylim((dt.date(2015,12,16), dt.date(2017,1,14)))
if start >= 1940:
ax.set_xticks(np.arange(start, 2021, 5))
else:
ax.set_xticks(np.arange(start, 2021, 10))
ax.set_xlim((start-2, 2022))
# Name, Lower Limit, Upper Limit, Column, Color
props = [['Snow', '', 0, 0, df.sn, 'C7'],
['Rain', '', 0, 0, df.rn, 'C2'],
['Frigid', '(< -15°C)', -100, -15, df.tmx, 'C1'],
['Freezing', '(-15–0)', -15, 0, df.tmx, 'C5'],
['Cold', '(0–15)', 0, 15, df.tmx, 'C4'],
['Cool', '(15–23)', 15, 23, df.tmx, 'C3'],
['Warm', '(23–30)', 23, 30, df.tmx, 'C0'],
['Hot', '(≥30)', 30, 100, df.tmx, 'C6']]
props = [props[i] for i in use]
# Make a new dataframe starting at the desired location, and make
# a column with the correct date, but year as 2016, for plotting
d = dt.datetime(start,1,1)
ix = df.index.get_loc(d)
tf = df.iloc[ix:,:].copy()
tf['Now'] = tf.index
tf['Now'] = tf['Now'].apply(lambda t: t.replace(year=2016))
# make a separate frames for wet and dry days
cn = tf.columns[4] # dry column name (Max Temp)
# get main column names (-1 is last column, i.e. 'Now')
mcol = tf.columns[[df.tmx] + df.precips + [-1]]
precip = tf.columns[df.pr]
tf.loc[np.isnan(tf[precip]), precip] = 0 # set rows without data to dry
dryf = tf.loc[tf[precip]==0, mcol]
wetf = tf.loc[tf[precip]>0, mcol]
# Just select the rows that meet the criteria, then plot that row's
# Year vs Now (Now being the date moved to 2016).
handles = [] # create legend manually
for name, r, ll, ul, col, c in props:
cn = tf.columns[col]
if col in df.precips:
sf = wetf.loc[wetf[cn]>0]
else:
sf = dryf.loc[dryf[cn]>=ll]
sf = sf.loc[sf[cn]<ul]
ax.plot(sf.index.year, sf.Now, '_', color=c, alpha=1.0, markersize=4,
label='')
line = mpatches.Patch(color=c, label=' '.join([name,r]))
handles.append(line)
# Annotate chart
plt.title('Precipitation (Rain, Snow) or \n'
'Dry Days (by Daily High Temperature Range) in '+ df.city)
ax.legend(loc='upper left', ncol=4, markerscale=3, handles=handles,
bbox_to_anchor=(0, -0.04), handlelength=0.8, fontsize='small')
at.Attribute(va='below', source=st.source)
# Add second y-axis
at.AddYAxis(ax, month=True)
plt.show()
return
def DayCountPlot(df, use = [0,1,2,3,4,5,6,7], column=None, style='fill',
trendonly=False, **kwargs):
"""Go through all data and plot what the weather was like for each day.
Parameters
----------
df : WxDF
object containing daily data for a location. Can use a
pandas.DataFrame if df.city comtains the name of the city.
use : list of int default [0,1,2,3,4,5,6,7]
Data to plot.
column : int [4 | 6 | 8] default 4
Which data column to use, (high, low, mean).
style : ['fill' | 'stack' | 'line'] default 'fill'
Style of plot to make. 'fill' fills the line to the baseline. 'stack'
makes a stack plot where the areas add to 100%. 'line' has no fill
and just shows the data.
trendonly : boolean default False
True if only the trend line is needed. The style keyword determines
how it will look.
kwargs : **dict
keywords to pass to the smoothing function that override the default
values. Supply 'trend=None' or 'trend=False' if no trend line desired.
See Smoothing.Smooth or the trendDefault definition for explanation.
Note
----
Colours are determined by the stylesheet
"""
trend = _GetTrendArgs(**kwargs)
useTrend = bool(trend['trend'])
if not column: column = df.tmx # set default value
tfilt = Texture('noise', block=2, light=True)
tshadow = Texture('shadow', pad=.2, alpha=0.4, cut_figure=False)
fig = plt.figure(df.city+'_DayCount')
fig.clear()
ax = fig.add_subplot(111)
sFill = sLine = sStack = False
if style == 'line': sLine = True
elif style == 'stack': sStack = True
else: sFill = True
# Name, Lower Limit, Upper Limit, Column, Color
props = [['Snow', '', 0, 0, df.sn, 'C7'],
['Rain', '', 0, 0, df.rn, 'C2'],
['Frigid', '(< -15°C)', -100, -15, df.tmx, 'C1'],
['Freezing', '(-15–0)', -15, 0, df.tmx, 'C5'],
['Cold', '(0–15)', 0, 15, df.tmx, 'C4'],
['Cool', '(15–20)', 15, 20, df.tmx, 'C3'],
['Warm', '(20–30)', 20, 30, df.tmx, 'C0'],
['Hot', '(≥30)', 30, 100, df.tmx, 'C6']]
ct = {df.tmx: 'High',
df.tmn: 'Low',
df.tav: 'Mean'}
props = [props[i] for i in use]
cmap = {} # colour map
tmap = {} # text values (label and range)
[cmap.update({p[0]:p[5]}) for p in props]
[tmap.update({p[0]:' '.join([p[0], p[1]])}) for p in props]
# make a separate frames for wet and dry days
cn = df.columns[column] # dry column name (Max Temp)
precip = list(df.columns[df.precips])
dryf = df.loc[df[precip[-1]]==0, [cn]]
wetf = df.loc[df[precip[-1]]>0, precip]
if sStack:
# For days with both rain and snow, zero the lesser amount. This makes
# stack total closer to 365.
rain = df.columns[df.rn]
snow = df.columns[df.sn]
wetf.loc[wetf[rain]>=wetf[snow], snow] = 0
wetf.loc[wetf[rain]< wetf[snow], rain] = 0
x = list(range(df.index[0].year, df.index[-1].year+1))
data = pd.DataFrame(index=x, dtype=int)
colors = []
labels = []
for name, r, ll, ul, col, c in props:
if col == df.tmx:
# use provided column, not the column in props
cn = df.columns[column]
else:
cn = df.columns[col]
if col in df.precips:
sf = wetf.loc[wetf[cn]>0, [cn]]
else:
sf = dryf.loc[dryf[cn]>=ll, [cn]]
sf = sf.loc[sf[cn]<ul]
gr = sf.groupby(sf.index.year).count()
data[name] = gr[cn]
data.loc[np.isnan(data[name]), name] = 0
colors.append(c)
labels.append(' '.join([name,r]))
if useTrend or trendonly:
if not trend: trend='wma' # set default trend type if not given
tf = pd.DataFrame(index=data.index)
for t in data:
tf[t] = sm.Smooth(data[t], trend)
if trendonly:
# Use the trend data instead of actual data
useTrend = False
data = tf
# Create legend entries manually
handles = []
def AddLegend(c, t):
# add a legend entry for this color
line = mpatches.Patch(color=c, label=t)
handles.append(line)
sums = data.sum() # sort by total area
sums.sort_values(inplace=True, ascending=False)
plotOrd = list(sums.index)
if sFill:
# Get plot order
fa = 0.75
# if trend:
# fa = 0.15
for p in plotOrd:
ax.fill_between(data.index, data[p].values,
color=cmap[p], alpha=fa, label='',
agg_filter=tfilt)
ax.plot(data.index, data[p].values, color='k', lw=.5,
agg_filter=tshadow)
AddLegend(cmap[p], tmap[p])
if useTrend:
for p in plotOrd:
ax.plot(tf.index, tf[p].values, lw=3.0,
color=cmap[p], alpha=1.0, label='')
elif sStack:
ax.stackplot(data.index, data.values.T,
colors=colors, alpha=0.6, labels=labels)
[AddLegend(c, t) for c, t in zip(colors, labels)]
if useTrend:
sf = pd.DataFrame(index=tf.index)
sf['sum']=0
for p in tf:
sf['sum'] += tf[p]
ax.plot(sf['sum'].index, sf['sum'].values.T,
color = cmap[p], label='')
elif sLine:
for p in plotOrd:
AddLegend(cmap[p], tmap[p])
if not useTrend:
if trendonly:
ax.plot(data.index, data[p].values, '-',
color=cmap[p], alpha=st.ta, lw=st.tlw,
label='')
else:
ax.plot(data.index, data[p].values, '-',
color=cmap[p], alpha=st.ta, lw=st.dlw,
label='')
else:
ax.plot(data.index, data[p].values, '-',
color=cmap[p], alpha=st.da+.1, lw=st.dlw,
label='')
ax.plot(tf.index, tf[p].values,
color=cmap[p], alpha=st.ta, lw = st.tlw,
label='')
else:
# do nothing by default since might not be wanted.
pass
# Annotate chart
plt.title('Precipitation (Rain, Snow) or \n'
'Dry Days (by Daily ' + ct[column] +
' Temperature Range) in '+ df.city)
ax.set_ylabel('Number of Days per Year')
ax.legend(handles=handles, loc='upper left', ncol=4, markerscale=3,
bbox_to_anchor=(0, -0.04), handlelength=0.8, fontsize='small')
at.Attribute(va='below', source=st.source)
# Add second y-axis with percentages on right
ax2, pad = at.AddYAxis(ax, percent=365)
ax2.set_ylabel('Percent of Year')
fig.show()
def DayThreshPlot(df, cols=None, thresh=0.0, above=True, **kwargs):
"""Count the days above or below a threshold.
df : WxDF
object containing daily data for a location. Can use a
pandas.DataFrame if df.city comtains the name of the city.
cols : list of int default [df.tmx, df.tmn]
columns to plot.
thresh : float default 0.0
Threshold to test.
above : bool default True
Count days above threshold (warmer) or below.
Includes threshold value.
kwargs : **dict
keywords to pass to the smoothing function that override the default
values. See Smoothing.Smooth or the trendDefault definition for
explanation.
"""
trend = _GetTrendArgs(**kwargs)
if not cols: cols = [df.tmx, df.tmn]
if type(cols) != list:
cols = [cols]
cns = df.columns[cols]
fig = plt.figure(df.city+'_Threshold_'+str(thresh))
fig.clear()
ax = fig.add_subplot(111)
for cn in cns:
# Get the count of days above/below threshold, grouped by year
if above:
ys = df.loc[df[cn]>=thresh, cn]
else:
ys = df.loc[df[cn]<=thresh, cn]
ys = ys.groupby(ys.index.year).count()
if len(ys)<2: # don't plot if no data returned
print('No data for '+cn)
continue
ax.plot(ys, lw=st.dlw, color=st.colors[cn], label='')
tf = sm.Smooth(ys, trend)
ax.plot(tf, lw=st.tlw, color=st.colors[cn], alpha=st.ta, label=cn)
at.AddRate(tf.loc[1990:], label='{:.2} days/decade')
# Set up title from possible options
if above:
rtxt = "At or Above"
else:
rtxt = "At or Below"
if cols[0] in df.temps:
ttxt = 'Temperature'
utxt = '°C'
elif cols[0]==df.sn:
ttxt = 'Snow'
utxt = ' cm/day'
else:
ttxt = 'Rain'
utxt = ' mm/day'
title = " ".join([df.city, 'Days with', ttxt, rtxt, str(thresh)])+utxt
ax.set_title(title)
at.Attribute(source=st.source, ha='left')
plt.legend()
ax2, pad = at.AddYAxis(ax)
ax.set_ylabel('Days')
plt.show()
def TemperatureCountPlot(df, use = [0,1,2,3,4,5], column=None, style='fill',
trendonly=False, **kwargs):
"""Count the days in each temperature range. Plot in various formats.
Parameters
----------
df : WxDF
object containing daily data for a location. Can use a
pandas.DataFrame if df.city comtains the name of the city.
use : list of int default [0,1,2,3,4,5,6,7]
Data to plot. (Frigid, Freezing, Cold, Cool, Warm, Hot)
column : int [4 | 6 | 8] default 4
Which data column to use, (high, low, mean).
style : ['fill' | 'stack' | 'line'] default 'fill'
Style of plot to make. 'fill' fills the line to the baseline. 'stack'
makes a stack plot where the areas add to 100%. 'line' has no fill
and just shows the data.
trendonly : boolean default False
True if only the trend line is needed. The style keyword determines
how it will look.
kwargs : **dict
keywords to pass to the smoothing function that override the default
values. Supply 'trend=False' if no trend line is desired.
See Smoothing.Smooth or the trendDefault definition for explanation.
"""
trend = _GetTrendArgs(**kwargs)
useTrend = bool(trend['trend'])
if not column: column = df.tmx # set default
tfilt = Texture()
ct = {df.tmx: 'High',
df.tmn: 'Low',
df.tav: 'Mean'} # text used in title
city = df.city
fig = plt.figure(df.city+'_TemperatureCount')
fig.clear()
ax = fig.add_subplot(111)
sFill = sLine = sStack = False
if style == 'line': sLine = True
elif style == 'stack': sStack = True
else: sFill = True
# Name, Lower Limit, Upper Limit, Column, Color
props = [['Frigid', '(< -15°C)', -100, -15, 'C1'],
['Freezing', '(-15–0)', -15, 0, 'C5'],
['Cold', '(0–15)', 0, 15, 'C4'],
['Cool', '(15–22)', 15, 22, 'C3'],
['Warm', '(22–30)', 22, 30, 'C0'],
['Hot', '(≥30)', 30, 100, 'C6']]
props = [props[i] for i in use] # only include provided columns
cmap = {} # colour map
tmap = {} # text values (label and range)
[cmap.update({p[0]:p[4]}) for p in props]
[tmap.update({p[0]:' '.join([p[0], p[1]])}) for p in props]
cn = df.columns[column] # column name
df = df[cn]
# Create empty series for when all df data is outside a test case
dfmx = df.max()
dfmn = df.min()
dfz = pd.Series(index=np.arange(df.index[0].year,
df.index[-1].year + 1))
x = list(range(df.index[0].year, df.index[-1].year+1))
data = pd.DataFrame(index=x, dtype=int)
colors = []
labels = []
for name, r, ll, ul, c in props:
if (ll > dfmx) or (ul < dfmn): # handle case where no data results
gr = dfz
gr[:] = 0
else:
sf = df[(df>=ll) & (df<ul)]
gr = sf.groupby(sf.index.year).count()
data[name] = gr
data.loc[np.isnan(data[name]), name] = 0 # set nan to 0
colors.append(c)
labels.append(' '.join([name,r]))
if useTrend or trendonly:
# set default trend type if not given
if not useTrend: trend['trend']=trendDefault['trend']
tf = pd.DataFrame(index=data.index)
for t in data:
tf[t] = sm.Smooth(data[t], trend)
if trendonly:
# Use the trend data instead of actual data
useTrend = False
data = tf
# Create legend entries manually
handles = []
def AddLegend(c, t):
# add a legend entry for this color
line = mpatches.Patch(color=c, label=t)
handles.append(line)
sums = data.sum() # sort by total area
sums.sort_values(inplace=True, ascending=False)
plotOrd = list(sums.index)
if sFill:
# Get plot order
fa = 0.75
if useTrend:
fa = 0.15
for p in plotOrd:
ax.fill_between(data.index, data[p].values,
color=cmap[p], alpha=fa, label='',
agg_filter=tfilt)
AddLegend(cmap[p], tmap[p])
if useTrend:
for p in plotOrd:
ax.plot(tf.index, tf[p].values, lw=3.0,
color=cmap[p], alpha=1.0, label='')
elif sStack:
ax.stackplot(data.index, data.values.T,
colors=colors, alpha=0.6, labels=labels)
[AddLegend(c, t) for c, t in zip(colors, labels)]
if useTrend:
sf = pd.DataFrame(index=tf.index)
sf['sum']=0
for p in tf:
sf['sum'] += tf[p]
ax.plot(sf['sum'].index, sf['sum'].values.T,
color = cmap[p], label='')
elif sLine:
for p in plotOrd:
AddLegend(cmap[p], tmap[p])
if not useTrend:
if trendonly:
ax.plot(data.index, data[p].values, '-',
color=cmap[p], alpha=st.ta, lw=st.tlw,
label='')
else:
ax.plot(data.index, data[p].values, '-',
color=cmap[p], alpha=st.ta, lw=st.dlw,
label='')
else:
ax.plot(data.index, data[p].values, '-',
color=cmap[p], alpha=st.da+.1, lw=st.dlw,
label='')
ax.plot(tf.index, tf[p].values,
color=cmap[p], alpha=st.ta, lw = st.tlw,
label='')
else:
# do nothing by default since might not be wanted.
pass
# Annotate chart
plt.title('Count of Days by\n'
'Daily ' + ct[column] + ' Temperature Range in '+ city)
ax.set_ylabel('Number of Days per Year')
ax.legend(handles=handles, loc='upper left', ncol=3, markerscale=3,
bbox_to_anchor=(0, -0.04), handlelength=0.8, fontsize='small')
at.Attribute(va='below', source=st.source)
# Add second y-axis with percentages on right
ax2, pad = at.AddYAxis(ax, percent=365)
ax2.set_ylabel('Percent of Year')
fig.show()
def WarmPlot(df, high=0, low=0, **kwargs):
"""
Plot the length of the warm season over time.
Parameters
----------
df : WxDF
object containing daily data for a location. Can use a
pandas.DataFrame if df.city comtains the name of the city.
high : float default 0
Crossover temperature for the daily high. Set to None to remove it.
low : float default 0
Crossover temperature for the daily low. Set to None to remove it.
kwargs : **dict
keywords to pass to the smoothing function that override the default
values. See Smoothing.Smooth or the trendDefault definition for
explanation.
"""
trend = _GetTrendArgs(**kwargs)
longTrend = _GetTrendArgs(trend, size=61)
cols = [df.tmx, df.tmn]
af = df.iloc[:,cols].copy()
maxc, minc = df.columns[cols]
dy = ' Day'
tr = ' Trend'
ny = pd.Timestamp(year=2016, month=1, day=1)
for c in [maxc, minc]:
af[c] = sm.Smooth(df[c], longTrend)
by = af.loc[af.index.dayofyear < 182] # beginning of year
ey = af.loc[af.index.dayofyear > 182] # end of year
wby = by.groupby(by.index.year).mean() # just getting the proper index
wey = wby.copy()
for f, h in zip([wby, wey], [by, ey]):
for c, lim in zip([maxc, minc], [high, low]):
# get date for each year where it is just above freezing
if lim is None: continue
# get dates above the limit
temp = h.loc[h[c]>lim, [c]]
# get the remaining date with lowest temp for each year
f[c+dy] = temp.groupby(temp.index.year).idxmin()
# change year to 2016 for plotting
f[c+dy] = f[c+dy].apply(lambda x: x.replace(year=2016))
a = f[c+dy].apply(lambda x: x.dayofyear)
a = sm.Smooth(a, trend)
# convert dayofyear to dates
f[c+tr] = a.apply(lambda x: ny + pd.to_timedelta(x-1, unit='d'))
# Set up plot
majorFmt = mdates.DateFormatter('%b %d')
minorFmt = mdates.DateFormatter('')
majorFmtRt = mdates.DateFormatter('%d %b')
xlim = (wby.index.min()-5, wby.index.max()+5)
xticks = np.arange((xlim[0]//10*10), ((xlim[1]//10+1)*10), 10)
fig = plt.figure(df.city+'_Warm')
fig.clear()
title = "Average Daily Temperatures Crossing Threshold for " + df.city
fig.suptitle(title)
fig.subplots_adjust(hspace=0.01, wspace=0.1,
left=0.08, right=0.92,
bottom=0.05, top=0.95)
ax0 = fig.add_subplot(2, 1, 2) # bottom chart
ax1 = fig.add_subplot(2, 1, 1)
ax1.spines['bottom'].set_visible(False)
ax0.spines['top'].set_visible(False)
ax1.xaxis.tick_top()
ax1.tick_params(labeltop='off') # don't put tick labels at the top
ax0.xaxis.tick_bottom()
ax1.tick_params(axis='x', which='major', color=(0,0,0,0))
# ax0 is on the bottom and should hold the beginning year results (wby)
# Set Y Label positions and formats
for ax, f, mxc, mnc in zip([ax0, ax1], [wby, wey],
[maxc, minc], [minc, maxc]):
# find max and min value for each axes
mx = pd.Timestamp('2016-01-01')
mn = pd.Timestamp('2016-12-31')
cols = f.columns
for c in [mxc+dy, mnc+dy]:
if c not in cols: continue
mx = max(mx, f[c].max())
mn = min(mn, f[c].min())
ax.set_ylim(mn, mx)
# locators must be declared separately for each axes but
# the format can be reused.
ax.yaxis.set_major_locator(mdates.DayLocator(range(5,32,5)))
ax.yaxis.set_minor_locator(mdates.DayLocator())
ax.yaxis.set_major_formatter(majorFmt)
ax.yaxis.set_minor_formatter(minorFmt)
ax.set_xticks(xticks)
ax.set_xlim(xlim[0], xlim[1])
for ax, f in zip([ax0, ax1], [wby, wey]):
for c, co, lim in zip([maxc, minc], ['C0', 'C1'], [high, low]):
if lim is None: continue
ax.plot(f[c+tr], co+'-', lw=st.tlw, alpha=st.ta)
ax.plot(f[c+dy], co+'o-', lw=st.dlw, alpha=st.da)
# Create legend entries manually
handles = []
for c, t, lim in zip(['C0', 'C1'],
['Daily High', 'Daily Low'],
[high, low]):
if lim is None: continue
line = mlines.Line2D([], [], color=c,
alpha=1.0, lw=st.tlw,
label=(t+' Crossing {:.1f} °C').format(lim))
handles.append(line)
plt.legend(handles=handles, loc=2)
at.Attribute(ax=ax0, source=st.source, ha='left')
# Add labels on right
ax2 = ax0.twinx()
ax3 = ax1.twinx()
for ax, axo in zip([ax2, ax3], [ax0, ax1]):
ax.grid(False) # is sitting on top of lines
ax.set_yticks(axo.get_yticks())
ax.set_ylim(axo.get_ylim())
ax.yaxis.set_major_locator(axo.yaxis.get_major_locator())
ax.yaxis.set_minor_locator(axo.yaxis.get_minor_locator())
ax.yaxis.set_major_formatter(majorFmtRt)
ax.yaxis.set_minor_formatter(minorFmt)
ax.spines['right'].set_alpha(0)
plt.show()
def WarmDaysPlot(df, **kwargs):
"""
Plot the length of the warm season over time.
Parameters
----------
df : WxDF
object containing daily data for a location. Can use a
pandas.DataFrame if df.city comtains the name of the city.
kwargs : **dict
keywords to pass to the smoothing function that override the default
values. See Smoothing.Smooth or the trendDefault definition for
explanation.
"""
trend = _GetTrendArgs(**kwargs)
longTrend = _GetTrendArgs(trend, size=61) # use for longer trends
cols = [df.tmx, df.tmn]
af = df.iloc[:,cols].copy()
maxc, minc = df.columns[cols]
dy = ' Day'
tr = ' Trend'
for c in [maxc, minc]:
af[c] = sm.Smooth(df[c], longTrend)
by = af.loc[af.index.dayofyear < 182] # beginning of year
ey = af.loc[af.index.dayofyear > 182] # end of year
wby = by.groupby(by.index.year).mean() # just getting the proper index
wey = wby.copy()
diff = wby.copy()
cy=wby.copy() # count of all days above freezing
xlim = (wby.index.min()-5, wby.index.max()+5)
xticks = np.arange((xlim[0]//10*10), ((xlim[1]//10+1)*10), 10)
for c in [maxc, minc]:
for f, h in zip([wby, wey], [by, ey]):
# get date for each year where it is just above freezing
temp = h.loc[h[c]>0, [c]]
f[c+dy] = temp.groupby(temp.index.year).idxmin()
f[c+dy] = f[c+dy].apply(lambda x: x.dayofyear)
diff[c+dy] = wey[c+dy] - wby[c+dy]
diff[c+tr] = sm.Smooth(diff[c+dy], trend)
# Collect data on total days, which might be useful later
temp = df.loc[df[c]>0, [c]]
cy[c+dy] = temp.groupby(temp.index.year).count()
cy[c+tr] = sm.Smooth(cy[c+dy], trend)
fig = plt.figure(df.city+'_WarmCount')
fig.clear()
ax = fig.add_subplot(111)
ax.set_xticks(xticks)
ax.set_xlim(xlim[0], xlim[1])
for c, co, l in zip([maxc, minc], ['C0', 'C1'],
['Daily High above 0°C', 'Daily Low above 0°C']):
ax.plot(diff[c+dy], 'o-', color=co, lw=st.dlw, alpha=st.da, label='')
ax.plot(diff[c+tr], '-', color=co, lw=st.tlw, alpha=st.ta,
label=l)
#ax.plot(cy[c+dy], '.', color=co, lw=st.dlw, alpha=st.da, label='')
#ax.plot(cy[c+tr], '-', color=co, lw=st.dlw, alpha=st.da, label='')
at.AddRate(diff[c+tr].loc[1970:], label='{:.2} days/decade')
plt.title('Length of Period With Average Temperature\n'
'above Freezing for ' + df.city)
plt.ylabel('Days')
plt.legend()
at.Attribute(source=st.source)
at.AddYAxis(ax)
plt.show()
def SnowPlot(df, **kwargs):
"""
Go through all data and plot first and last day of snow for the year.
Parameters
----------
df : WxDF
object containing daily data for a location. Can use a
pandas.DataFrame if df.city comtains the name of the city.
"""
trend = _GetTrendArgs(**kwargs)
shortTrend = _GetTrendArgs(trend, size=15)
# set up data for each set of records:
# [Name, df column, mark color and format, zorder]
# Create list of daily records. Use 2016 as reference year (leap year)
col = df.sn
cn = df.columns[col]
df = df.loc[df[cn]>0] # get days with snow
# put day of year with snow
af = pd.Series(data=df.index.dayofyear, index=df.index)
dy = ' Day'
tr = ' Trend'
ny = pd.Timestamp('2016-01-01') # start of year
fig = plt.figure(df.city+'_Snow')
fig.clear()
ax = fig.add_subplot(111)
# af, by, ey are series, not dataframes
by = af[af < 182] # beginning of year
ey = af[af > 182] # end of year
# create a dataframe to put series into
wby = pd.DataFrame(index=list(range(df.index[0].year,
df.index[-1].year+1)))
wey = wby.copy()
for f, h in zip([wby, wey], [by, ey]):
# get latest or earliest date for each year where it snowed
gr = h.groupby(h.index.year)
if h is by:
f[cn+dy] = gr.idxmax()
else:
f[cn+dy] = gr.idxmin()
f[cn+dy] = f[cn+dy].apply(lambda x: x.replace(year=2016))
a = f[cn+dy].apply(lambda x: x.dayofyear)
a = sm.Smooth(a, shortTrend)
f[cn+tr] = a.apply(lambda x: ny + pd.to_timedelta(x-1, unit='d'))
ax.plot(f[cn+dy], 'o-', color='C5',
linewidth=st.dlw,
alpha=st.ta)
plt.plot(f[cn+tr], '-', linewidth=st.tlw, color='C1')
plt.title('First and Last Snowfall for ' + df.city)
plt.axis([1885, 2020, '20161231', '20160101'])
at.Attribute(ax, ha='left', va='top', source=st.source)
at.MonthFmt(ax)
at.AddYAxis(ax, month=True)
plt.show()
return
def TopPrecipPlot(df, cols=None, lim = 10, **kwargs):
"""Plot average precipitation for top days of each year
Parameters
----------
df : pandas.DataFrame
daily data contained in a pandas DataFrame
cols : list of int [df.rn | df.sn | df.pr] default [df.rn, df.sn]
precipitation column to use (rain, snow, all)
lim : float default 10
percentage of annual values to use for calculation
size : int default 21
Size of the moving average window. Larger values give smoother
results.
kwargs : **dict
keywords to pass to the smoothing function that override the default
values. See Smoothing.Smooth or the trendDefault definition for
explanation.
"""
trend = _GetTrendArgs(**kwargs)
if not cols: cols = [df.rn, df.sn]
frac = lim/100
tmap = dict(zip(df.precips, ['Rain', 'Snow', 'Precipitation']))
tunit = dict(zip(df.precips, ['mm', 'cm', 'mm']))
fig = plt.figure(df.city+'_TopPrecip')
fig.clear()
ax = fig.add_subplot(111)
for col in cols:
cn = df.columns[col]
ps = pd.Series(index=list(range(df.index[0].year,
df.index[-1].year+1)))
rs = df.loc[df[cn]>0, cn].copy()
gs = rs.groupby(rs.index.year)
for yr, ys in gs:
num = int(len(ys) * frac + 0.5)
ps[yr] = ys.nlargest(num).mean()
ts = sm.Smooth(ps, trend)
ax.plot(ps, lw=st.dlw, color=st.colors[col])
ax.plot(ts, lw=st.tlw, color=st.colors[col],
label=tmap[col]+' ('+tunit[col]+')')
at.AddRate(ts.loc[1970:], ax=ax,
label='{:.2} '+tunit[col]+'/decade')
plt.legend()
ax.set_title('Average Precipitation of Top {0}% '
'Days Per Year in {1}'.format(lim, df.city))
ax.set_ylabel('Precipitation (mm/cm per day)')
at.Attribute(ax, ha='left', va='bottom', source=st.source)
at.AddYAxis(ax)
fig.show()
def StormPlot(df, cols=None, lim = 10, **kwargs):
"""Plot average total precipitation for top storms (consecutive days of
precipitation) of each year.
Parameters
----------
df : pandas.DataFrame
daily data contained in a pandas DataFrame
cols : list of int [df.rn | df.sn | df.pr] default [df.rn, df.sn]
precipitation column to use (rain, snow, all)
lim : float default 10
percentage of annual values to use for calculation
kwargs : **dict
keywords to pass to the smoothing function that override the default
values. See Smoothing.Smooth or the trendDefault definition for
explanation.
"""
trend = _GetTrendArgs(**kwargs)
if not cols: cols = [df.rn, df.sn] # default values
fig = plt.figure(df.city+'_Storms')
fig.clear()
ax = fig.add_subplot(111)
frac = lim/100
tmap = dict(zip(df.precips, ['Rain', 'Snow', 'Precipitation']))
tunit = dict(zip(df.precips, ['mm', 'cm', 'mm']))
ps = pd.Series(index=list(range(df.index[0].year,
df.index[-1].year+1)))
for col in cols:
# Extract the required column. Use [col] instead of just col to force
# a dataframe instead of a series.
tf = df.iloc[:,[col]].copy()
cn = df.columns[col] # column name
# Create a new column that has True where there was no precipiation
# and False when there was. These will be treated as 1/0 later.
tf['dry'] = tf[cn].apply(lambda x: x==0)
# Create a new column that has the cumulative sum of the 'dry' column.
# What happens is that this value increases when it was dry, but
# remains constant when it wasn't (True=1). This allows grouping
# by consecutive precipitation days, since they will have the same
# 'c' value.
tf['c'] = tf['sun'].cumsum()
# Create a new frame grouped by the year and the 'c' value, with
# days with same 'c' value (consecutive) summed together.
storms = tf.groupby([tf.index.year, 'c']).sum()
# Remove the days that didn't have any precipitation
storms = storms[storms[cn]>0]
# Now just group by year (the first level index)
gr = storms.groupby(level=0)
# Go through each year, find the desired fraction of values,
# then take the mean of the top number of values. Insert this result
# into the precipitation series by year.
for yr, yf in gr:
num = int(yf[cn].count() * frac + 0.5)
ps[yr] = yf[cn].nlargest(num).mean()
# Get the smoothed data, and plot the results.
ts = sm.Smooth(ps, trend)
ax.plot(ps, lw=st.dlw, color=st.colors[col])
ax.plot(ts, lw=st.tlw, color=st.colors[col],
label=tmap[col]+' ('+tunit[col]+')')
at.AddRate(ts.loc[1970:], ax=ax,
label='{:.2} '+tunit[col]+'/decade')
plt.legend()
ax.set_title('Average Total Precipitation of Top {0}% '
'of Storms Per Year in {1}'.format(lim, df.city))
ax.set_ylabel('Precipitation (mm/cm per storm)')
at.Attribute(ax, ha='left', va='bottom', source=st.source)
at.AddYAxis(ax)
fig.show()
def MonthRangePlot(df, month=None, combine=True, **kwargs):
"""Get expected high and low temperature ranges for the supplied month.
Parameters
----------
df : pandas.DataFrame
daily data contained in a pandas DataFrame
month : int default None
Desired month (1-12). The default gives current month.
combine : boolean default True
Combine the maximum and minimum temperatures onto one plot. Otherwise
use two separate plots (which is easier to read).
kwargs : **dict
keywords to pass to the smoothing function that override the default
values. See Smoothing.Smooth or the trendDefault definition for
explanation.
Note
----
Uses moving average to calculate the mean temperatures, and the standard
deviation from this.
"""
# Approach:
# Get monthly means of mean, high, and low temperatures
# Calculate the standard deviation of temperatures
# Calculate the means ± standard deviations
# Get smoothed means and plot those
# Get min and max values of high and low temps and plot those.
trend = _GetTrendArgs(**kwargs)
if month is None or month==0 or month>12:
month = dt.date.today().month
maxc = df.columns[df.tmx] # max:0/4, min:1/6, avg:2/8
minc = df.columns[df.tmn]
avgc = df.columns[df.tav]
umaxc = 'umaxc'
lmaxc = 'lmaxc'
uminc = 'uminc'
lminc = 'lminc'
uavgc = 'uavgc'
lavgc = 'lavgc'
# just use year, max, min, avg temps for desired month
df = df.loc[df.index.month==month, [maxc, minc, avgc]]
# Get rid of rows that have 'nan' values
df.dropna(inplace=True)
gb = df.groupby(df.index.year)
sf = gb.std()
af = gb.mean() # mean
mx = gb.max() # max, highest value above mean
mn = gb.min() # min, lowest value below mean
# calculate temperature ranges
for cr, c in zip([umaxc, uminc, uavgc], [maxc, minc, avgc]):
af[cr] = af[c] + sf[c]
for cr, c in zip([lmaxc, lminc, lavgc], [maxc, minc, avgc]):
af[cr] = af[c] - sf[c]
afs = af.copy() # smoothed version of temps and ranges
# Get the daily average max, min, avg temps.
for c in af.columns:
afs[c] = sm.Smooth(af[c], trend)
# PLOTTING
title = 'Temperature Range in '+df.city+' for '+ st.monthL[month]
fig = plt.figure(df.city+'_'+st.monthS[month]+'_Range')
fig.clear()
if not combine: # create two separate plots
plt.subplots_adjust(hspace=0.001, wspace=0.1,
left=0.08, right=0.92,
bottom=0.05, top=0.95)
ax0 = plt.subplot(2, 1, 1)
ax1 = plt.subplot(2, 1, 2, sharex=ax0)
xt = ax0.get_xticklabels()
plt.setp(xt, visible=False)
fig.suptitle(title)
else: # just put everything on one plot
ax0 = plt.subplot(1, 1, 1)
ax1 = ax0
plt.title(title)
ax0.fill_between(mx.index, mx[maxc], mn[maxc],
color='C0', alpha=st.ma, label='Upper/Lower Highs')
ax1.fill_between(mx.index, mx[minc], mn[minc],
color='C1', alpha=st.ma, label='Upper/Lower Lows')
ax0.fill_between(afs.index, afs[umaxc], afs[lmaxc],
color='C0', alpha=st.sa, label='68% Range Highs')
ax1.fill_between(afs.index, afs[uminc], afs[lminc],
color='C1', alpha=st.sa, label='68% Range Lows')
if not combine:
at.AddRate(afs[maxc].loc[1970:], ax=ax0)
at.AddRate(afs[minc].loc[1970:], ax=ax1)
ax0.plot(afs[maxc], 'C0-', lw=2, alpha=st.ta, label='Average Highs')
ax1.plot(afs[minc], 'C1-', lw=2, alpha=st.ta, label='Average Lows')
if combine:
ax0.plot(afs[avgc], 'C2-', lw=st.tlw, alpha=st.ta, label='Average Daily')
# Add current available month as distinct points
ly = af.index[-1]
marks = ['^', 'o', 'v']
maxvals = [mx.iloc[-1,0], af.iloc[-1,0], mn.iloc[-1,0]]
minvals = [mx.iloc[-1,1], af.iloc[-1,1], mn.iloc[-1,1]]
maxt = [' Max\n Day', ' Avg\n Day', ' Min\n Day']
mint = [' Max\n Ngt', ' Avg\n Ngt', ' Min\n Ngt']
maxt[0] = str(ly) + '\n' + maxt[0]
for mk, mxv, mnv, mxt, mnt in zip(marks, maxvals, minvals, maxt, mint):
ax0.plot(ly, mxv, color='C0', marker=mk)
ax1.plot(ly, mnv, color='C1', marker=mk)
ax0.text(ly, mxv, mxt, ha='left', va='center', size='small')
ax1.text(ly, mnv, mnt, ha='left', va='center', size='small')
# Annotate
ax0.set_ylabel('Temperature °C')
if not combine:
ax1.set_ylabel('Temperature °C')
def mid(x, f=.5):
"""Get the midpoint between min and max, or a fraction if supplied
"""
mn = min(x)
mx = max(x)
return (mx-mn)*f + mn
def best(s, i, comp):
"""Get the best point (highest or lowest) near the supplied point.
s: series
i: index location
comp: 'low' | 'high'
"""
lim = 3
t = s.iloc[i-lim: i+lim]
t.sort_values(inplace=True, ascending=(comp=='low'))
return s.index.get_loc(t.index[0]), t.iloc[0]
txt0 = ['Hottest Day', 'Coldest Day',
'Average High', '68% Day Spread']
txt1 = ['Hottest Night', 'Coldest Night',
'Average Low', '68% Night Spread']
va0 = ['top', 'bottom', 'bottom', 'top']
va1 = ['top', 'bottom', 'top', 'bottom']
yrs = len(afs.index)
xt0 = [yrs*.05, yrs*.05, yrs*.4, yrs*.25]
xt1 = [yrs*.05, yrs*.05, yrs*.4, yrs*.25]
xt0 = [int(x) for x in xt0] # text x locations
xt1 = [int(x) for x in xt1]
xp0 = [x+13 for x in xt0] # arrow x locations
xp1 = [x+13 for x in xt1]
yt0 = [mid(mx[maxc],.8), mid(mn[maxc],.2),
afs[maxc].iloc[xt0[2]]+2, afs[umaxc].iloc[xt0[3]]-1]
yt1 = [mid(mx[minc]), mid(mn[minc],.2),
afs[minc].iloc[xt1[2]-2], afs[lminc].iloc[xt1[3]]+1]
yp0 = [mx[maxc].iloc[xp0[0]], mn[maxc].iloc[xp0[1]],
afs[maxc].iloc[xp0[2]], afs[umaxc].iloc[xp0[3]]]
yp1 = [mx[minc].iloc[xp0[0]], mn[minc].iloc[xp0[1]],
afs[minc].iloc[xp1[2]], afs[lminc].iloc[xp1[3]]]
# get closest peaks for annotation
xp0[0], yp0[0] = best(mx[maxc], xp0[0], 'high')
xp1[0], yp1[0] = best(mx[minc], xp1[0], 'high')
xp0[1], yp0[1] = best(mn[maxc], xp0[1], 'low')
xp1[1], yp1[1] = best(mn[minc], xp1[1], 'low')
props = {'arrowstyle': '->',
'edgecolor': 'k'}
for t, v, i, y, ip, yp in zip(txt0, va0, xt0, yt0, xp0, yp0):
x = afs.index[i]
xp = afs.index[ip]
ax0.annotate(t, (xp,yp), (x,y), va=v, ha='center',
color='darkred', size='smaller',
arrowprops=props)
for t, v, i, y, ip, yp in zip(txt1, va1, xt1, yt1, xp1, yp1):
x = afs.index[i]
xp = afs.index[ip]
ax1.annotate(t, (xp,yp), (x,y), va=v, ha='center',
color='darkblue', size='smaller',
arrowprops=props)
if combine:
x = afs.index[xt0[2]]
y = afs[avgc].iloc[xt0[2]]
ax0.text(x, y, 'Month Average',
ha='center', va='bottom', size='smaller')
at.Attribute(source=st.source)
at.AddYAxis(ax0)
if not combine: at.AddYAxis(ax1)
plt.show()
def Histograms(df, col=WxDF.tmx, months=None,
llim=None, ulim=None, showMedian=True, **kwargs):
"""Plot histograms of values for groups of years
Parameters
----------
df : pandas.DataFrame
daily data contained in a pandas DataFrame
col : int default WxDF.tmx
Which data column to use. Defaults to max temperature.
months : list default None
List of which months to use. Passing None will cause the entire
year to be used.
llim : float default None
Lower limit of data, showing only values at or above this.
ulim : float default None
Upper limit of data, showing only values at or below this.
showMedian : bool default True
If True, shows a line demarking both median and mean value.
showRaw : bool default False
If True, shows the actual histogram bars as well as the smoothed
line. Useful for identifying data irregularities.
fuzz : bool default True
If True, checks if measured units are bigger than recorded units,
eg measured in Fahrenheit, but recorded in .1°C. If so, will
spread the measured value over any bins it could possibly overlap.
kwargs : **dict
keywords to pass to the smoothing function that override the default
values. See Smoothing.Smooth or the trendDefault definition for
explanation.
Note
----
Use the 'size' keyword to adjust how the smoothed line looks.
"""
trend = _GetTrendArgs(size=51, trend='ssa', pad='mirror')
trend = _GetTrendArgs(trend, **kwargs)
bw = kwargs.pop('bw', 1.0) # bin width, with override
scale = kwargs.pop('scale', 5) # amount of overlap on plots
cn = df.columns[col]
kind = {df.tmx: ['Daily High Temperature', '°C'],
df.tmn: ['Daily Low Temperature', '°C'],
df.tav: ['Daily Average Temperature', '°C'],
df.rn: ['Daily Rain', 'mm/day'],
df.sn: ['Daily Snow','cm/day'],
df.pr: ['Daily Precipitation', 'mm/day (equivalend)']}
# set up the year ranges to do histogram over
cyear = df.index.year.max()+1 # last year in data
ranges = []
if df.baseline[1] < 1970: # add period up to 1970, if applicable
ranges.append(tuple(df.baseline)) # start with baseline period
ranges.append((df.baseline[1], 1970))
for r in range(1970, df.index[-1].year, 10):
ranges.append((r, min(cyear, r+10)))
pos = list(range(len(ranges))) # position on the axes
pos = pos[::-1] # reverse order for plotting
tfilt = Texture('noise', block=2, light=True) # texture filter
tshadow = Texture('shadow', pad=.2, cut_figure=False, alpha=.7)
# Set up the figure
fig = plt.figure(df.city+'_Histogram '+df.columns[col])
fig.clear()
ax = fig.add_subplot(111)
title = f"{df.city} {kind[col][0]} Distribution"
# add limits
if llim is not None and ulim is not None:
title += f"\nBetween {llim} and {ulim} {kind[col][1]}"
elif llim is not None:
title += f"\nAbove {llim} {kind[col][1]}"
elif ulim is not None:
title += f"\nBelow {ulim} {kind[col][1]}"
# add months
if months is not None:
tlist = []
if llim is None and ulim is None: title += '\n'
for m in months:
tlist.append([', ', st.monthL[m]])
tlist[0][0] = ' For '
if len(months) > 1: tlist[-1][0] = ', and '
for t in tlist: title += t[0] + t[1]
ax.set_title(title)
ax.set_xlabel(kind[col][1])
# get just the desired months
mf = df[cn].dropna()
if months:
mf = mf[mf.index.month.isin(months)]
if col in df.precips:
mf = mf[mf > 0]
# get just the desired value ranges (for looking at edges)
if llim is not None:
mf = mf[mf >= llim]
if ulim is not None:
mf = mf[mf <= ulim]
# Bins are centred on bin width multiples, ie centred on 0
minb = np.round(mf.min()/bw)*bw - bw * .5
maxb = np.round((mf.max())/bw)*bw + bw
bins = np.arange(minb, maxb, bw, dtype=float)
def GetDiff(s):
"""Return the most common jump between values. Used to determine if
measured units differ from recorded units.
"""
vc = s.value_counts() # get list of values by count
keys = sorted(vc.index)
diffs = np.array(keys[1:]) - np.array(keys[:-1])
# there'll be numeric errors, so round before counting again
ds = pd.Series(diffs.round(2)).value_counts()
d0 = ds.index[0]
if d0 == 0.6: diff = 5/9 # Fahrenheit
elif d0 == 0.5: diff = 0.5
elif d0 == 0.1: diff = 0.1
elif len(ds.index) == 1:
diff = d0
else:
d1 = ds.index[1]
diff = (ds[d0] * d0 + ds[d1] * d1) / (ds[d0] + ds[d1])
return diff
def PlaceData(cts, col, s, check):
"""Creates counts of each recorded value. Also checks if measured
amounts larger than recorded resolution, which would result in
gaps in the bins. To avoid this, the values are spread out over
the width of the measurement error, and allocating the
measurement to multiple bins proportionately to the overlap.
"""
vc = s.value_counts() # get list of values by count
keys = sorted(vc.index)
if check: # check if distance between values large
diff = GetDiff(s)
if diff == 0.1: check = False
if check:
# allocate a portion of a value count to the bins the
# value might overlap due to measurement error not aligning
# with the bins.
for k in keys:
val = vc[k]
kmin = k - diff * 0.5
kmax = k + diff * 0.5
bl = int((kmin - minb)//bw) # get lowest bin
bh = int((kmax - minb)//bw) # get highest bin
# get amount of overlap on bin
rl = (1 - ((kmin - minb)%bw)) * bw/diff
rh = ((kmax - minb)%bw) * bw/diff
cts[col].iloc[bl] += val * rl
cts[col].iloc[bh] += val * rh
# now add any bins inbetween
if bh-bl > 1:
ratio = bw/diff # if ratio > 1, nothing will happen
for b in range(bl+1, bh):
cts[col].iloc[b] += val * ratio
else:
for k in keys:
b = int((k - minb)//bw) # which bin to use
cts[col].iloc[b] += vc[k] # add counts to bin
return check
# Now break up each range
counts = pd.DataFrame(index=bins)
medians = {} # dictionary of medians for each range
means = {}
doSpreading = kwargs.pop('fuzz', True)
for r in ranges:
# get the data for each range of years
c = f"{r[0]}—{r[1]-1}" # column/range name
rs = mf[mf.index.year.isin(range(r[0], r[1]))] # range series
medians[c] = rs.median()
means[c] = rs.mean()
counts[c] = pd.Series(index=bins, data=np.zeros(len(bins)))
if doSpreading:
diffS = GetDiff(rs[rs.index.year==r[0]])
diffE = GetDiff(rs[rs.index.year==(r[1]-1)])
if diffS==0 or diffE==0: print(r)
if diffS != diffE:
for year in range(r[0], r[1]):
ys = rs[rs.index.year==year]
doSpreading = PlaceData(counts, c, ys, check=True)
else:
if len(rs)==0:
print(f'Missing data: {r[0]}-{r[1]}')
doSpreading = PlaceData(counts, c, rs, check=True)
else:
PlaceData(counts, c, rs, check=False)
for c in counts: counts[c] /= counts[c].sum()
# counts now contains a normalized histogram in each column
# put in 10 points per bin
x = np.arange(minb, maxb, bw/10, dtype=float)
hf = pd.DataFrame(index=x, dtype=float) # holds histograms
sf = hf.copy() # holds smoothed data
hpk = spk = 0 # for scaling plots relative to peak of all plots
for c in counts:
y = np.zeros(len(x))
# add 10 points per bin
for i in range(len(y)):
b = int((x[i] - minb)//bw)
y[i] = counts[c].iloc[b]
hf[c] = y
sf[c] = sm.Smooth(hf[c], trend)
hpk = max(hpk, hf[c].max())
spk = max(spk, sf[c].max())
showRaw = kwargs.pop('showRaw', False)
showMedian = kwargs.pop('showMedian', True)
if showRaw: ratio = scale/hpk
else: ratio = scale/spk
bg = ax.get_facecolor() # use background color for contrast
bgcolor = (bg[0], bg[1], bg[2], 0.8) # add lower alpha
for c, p in zip(hf, pos):
hf[c] = hf[c] * ratio + p
sf[c] = sf[c] * ratio + p
if showRaw:
ax.fill_between(x, hf[c].values, p,
color=st.colors[col], zorder=20-p)
ax.plot(hf[c], lw=2, color='grey', zorder=20-p)
else:
ax.fill_between(x, sf[c].values, p, color=st.colors[col],
zorder=20-p, agg_filter=tfilt)
ax.plot(sf[c], lw=2, color='k', zorder=20-p)
ax.plot(sf[c], lw=2, color='k', zorder=20-p-.1,
agg_filter=tshadow)
if showMedian:
# ``i`` will be rounded to the nearest 10th of a degree
md = int(np.round((medians[c]-minb)*10))
ax.vlines(x[md], p, sf[c].iloc[md], linestyle=':',
color=bgcolor, alpha=.5, lw=2, zorder=20-p)
mn = int(np.round((means[c]-minb)*10))
ax.vlines(x[mn], p, sf[c].iloc[mn], linestyle='--',
color=bgcolor, alpha=.5, lw=2, zorder=20-p)
if showMedian: # label last histogram
title = ['Mean', 'Median']
xdel = [-1, 1]
xpos = [x[mn], x[md]]
ypos = .2
align = ['right', 'left']
left = 0
right = 1
if xpos[0] > xpos[1]:
left, right = right, left
for i, p in zip(range(2), [left, right]):
ax.text(xpos[i]+xdel[p], ypos, title[i], color=bgcolor,
zorder=30, alpha=.75, size='medium',
ha = align[p], va='bottom')
# Alternate approach using existing functions, but lacks options
# parts = ax.violinplot(data, pos, points=100, vert=False, widths=6.0,
# showmeans=False, showextrema=False,
# showmedians=False)
# for body in parts['bodies']:
# paths = body.get_paths()[0]
# mean = np.mean(paths.vertices[:, 1])
# paths.vertices[:, 1][paths.vertices[:, 1] <= mean] = mean
# body.set_edgecolor('black')
# body.set_facecolor(st.colors[col])
# body.set_lw(2)
# body.set_alpha(1)
ax.set_ylim(bottom=-1)
ax.set_yticks(pos)
tx = maxb
if llim is None:
tx = minb
for r, p in zip(hf.columns, pos):
t = plt.text(tx, p+.2, r, size='medium', zorder=30, va='bottom')
t.set_path_effects([path_effects.Stroke(linewidth=3,
foreground=bgcolor), path_effects.Normal()])
ax.tick_params(axis='y', labelcolor=(0,0,0,0))
ax.tick_params(axis='y', color=(0,0,0,0))
at.Attribute(ha='left', source=st.source)
plt.show()
# ======================
# Miscellaneous Routines
# ======================
def GridPlot(df, cols=2, title='', fignum=20):
"""Create a series of plots above each other, sharing x-axis labels.
Parameters
----------
df : pandas.DataFrame
DataFrame containing data to be plotted in separate columns. Column
names will be used for labels.
cols : int (opt) default 2
Number of columns per figure to use.
title : String (opt) default blank
Name to use on each figure. Page numbers are added.
fignum : int (opt) default 20
Figure to start at. Useful if you want to keep older plots available.
Notes
-----
Useful for plotting the SSA reconstructed principles to see which might be
important.
"""
rows = 4
n = len(df.columns)
if n <= rows: cols=1
plots = rows * cols
ax = list(range(plots))
pages = int(np.ceil(n / plots))
title = title + ' Page {}'
for page in range(pages):
fig = plt.figure(fignum+page, figsize=(14,10))
fig.clear()
fig.suptitle(title.format(page+1), fontsize=16, y=0.98)
plt.subplots_adjust(hspace=0.001, wspace=0.1,
left=0.05, right=0.95,
bottom=0.05, top=0.95)
end = min(plots, n - page * plots)
for i in range(end):
loc = page * plots + i
r = int(i/plots)
if r==0:
ax[i] = plt.subplot(rows, cols, i+1)
else:
ax[i] = plt.subplot(rows, cols, i+1, sharex=ax[i%cols])
if i < end-cols: # hide ticks on all but last cols plots
xt = ax[i].get_xticklabels()
plt.setp(xt, visible=False)
col = df.columns[loc]
ax[i].plot(df[col], label=col)
plt.legend(loc='upper left')
plt.show()
def CompareWeighting(df, cols=[8], size=31, fignum=20):
"""Compare various weighting windows on real data.
"""
yf = df.GetYears(cols)
yf = yf - yf.iloc[:30].mean()
col = yf.columns[0]
y = yf[col]
fig = plt.figure(fignum)
fig.clear()
plt.plot(y, 'ko-', lw=1, alpha=0.15,
label=(df.city+' '+col))
ma = sm.WeightedMovingAverage(y, size, winType=sm.Triangle)
plt.plot(ma, '-', alpha=0.8, lw=1, label='Triangle')
w = sm.Triangle(size, clip=0.8)
ma = sm.WeightedMovingAverage(y, size, wts=w)
plt.plot(ma, '-', alpha=0.8, lw=1, label='Clipped Triangle (0.5)')
ma = sm.WeightedMovingAverage(y, size, winType=np.hamming)
plt.plot(ma, '-', alpha=0.8, lw=1, label='Hamming')
ma = sm.WeightedMovingAverage(y, size)
plt.plot(ma, '-', alpha=0.8, lw=1, label='Hanning')
ma = sm.WeightedMovingAverage(y, size, winType=np.blackman)
plt.plot(ma, '-', alpha=0.8, lw=1, label='Blackman')
plt.title('Comparison of Window Types for Moving Average')
plt.legend(loc='upper left')
plt.ylabel('Temperature Change from Baseline (°C)')
# Annotate chart
at.Baseline(df.baseline)
plt.show()
def CompareSmoothing(df, cols=[8],
size=31,
frac=2./3., pts=31, itn=3, order=2,
lags=31,
fignum=21, city=0):
"""Comparison between moving weighted average and lowess smoothing.
df: daily records for a city
cols: list of columns to use. Currently only uses first column supplied.
size: size of moving average window
frac: fraction of data to use for lowess window
itn: number of iterations to use for lowess
order: order of the lowess polynomial
lags: number of time lags to use for SSA
"""
yf = df.GetYears(cols)
yf = yf - yf.iloc[:30].mean()
col = yf.columns[0]
y = yf[col]
fig = plt.figure(fignum)
fig.clear()
plt.plot(y, 'ko-', lw=1, alpha=0.15,
label=(df.city+' '+col))
if pts==None:
p = np.ceil(frac * len(y))
else:
p = pts
ma = sm.WeightedMovingAverage(y, size)
plt.plot(ma, 'b-', alpha=0.5, lw=2, label='Weighted Moving Average')
#mc = WeightedMovingAverage(y, size, const=True)
#plt.plot(mc, 'r-', alpha=0.5, lw=2, label='WMA Constant Window')
lo = sm.Lowess(y, f=frac, pts=pts, itn=itn)
plt.plot(lo, 'g-', alpha=0.5, lw=2, label='Lowess (linear)')
lp = sm.Lowess(y, f=frac, pts=pts, itn=itn, order=order)
plt.plot(lp, 'g.', alpha=0.5, lw=2, label='Lowess (polynomial)')
ss = sm.SSA(y, lags, rtnRC=2)
plt.plot(ss.iloc[:,0], 'r-', alpha=0.5, lw=2, label='SSA')
ss2 = ss.sum(axis=1)
plt.plot(ss2, 'r.', alpha=0.5, lw=2, label='SSA, 2 components')
#so = SMLowess(y, f=frac, pts=pts, iter=itn)
#plt.plot(so, 'c-', alpha=0.5, lw=2, label='SM Lowess')
plt.title('Comparison between Weighted Moving Average, Lowess, and SSA'
' - Padded')
plt.legend(loc='upper left')
plt.ylabel('Temperature Change from Baseline (°C)')
# Annotate chart
at.Baseline(df.baseline)
boxt = ("Moving Average:\n"
" Weights: Cosine (Hanning)\n"
" Size: {0}\n"
"Lowess:\n"
" Size: {1}\n"
" Iterations: {2}\n"
" Polynomial Order: {3}\n"
"Singular Spectrum Analysis:\n"
" Lags: {4}\n"
"Chart: @Dan613")
box = boxt.format(size, p, itn, order, lags)
plt.text(1987, np.floor(y.min())+.05, box)
plt.show()
return
if __name__=='__main__':
df = WxDF()
print(df)
| bsd-3-clause |
nburn42/tensorflow | tensorflow/contrib/learn/python/learn/estimators/dnn_linear_combined_test.py | 30 | 70017 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for DNNLinearCombinedEstimators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import json
import tempfile
import numpy as np
from tensorflow.contrib.layers.python.layers import feature_column
from tensorflow.contrib.learn.python.learn import experiment
from tensorflow.contrib.learn.python.learn.datasets import base
from tensorflow.contrib.learn.python.learn.estimators import _sklearn
from tensorflow.contrib.learn.python.learn.estimators import dnn_linear_combined
from tensorflow.contrib.learn.python.learn.estimators import estimator_test_utils
from tensorflow.contrib.learn.python.learn.estimators import head as head_lib
from tensorflow.contrib.learn.python.learn.estimators import model_fn
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.contrib.learn.python.learn.estimators import test_data
from tensorflow.contrib.learn.python.learn.metric_spec import MetricSpec
from tensorflow.contrib.metrics.python.ops import metric_ops
from tensorflow.python.feature_column import feature_column as fc_core
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.losses import losses
from tensorflow.python.platform import test
from tensorflow.python.training import adagrad
from tensorflow.python.training import ftrl
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import learning_rate_decay
from tensorflow.python.training import monitored_session
from tensorflow.python.training import server_lib
from tensorflow.python.training import session_run_hook
from tensorflow.python.training import sync_replicas_optimizer
from tensorflow.python.training import training_util
def _assert_metrics_in_range(keys, metrics):
epsilon = 0.00001 # Added for floating point edge cases.
for key in keys:
estimator_test_utils.assert_in_range(0.0 - epsilon, 1.0 + epsilon, key,
metrics)
class _CheckCallsHead(head_lib.Head):
"""Head that checks whether head_ops is called."""
def __init__(self):
self._head_ops_called_times = 0
@property
def logits_dimension(self):
return 1
def create_model_fn_ops(
self, mode, features, labels=None, train_op_fn=None, logits=None,
logits_input=None, scope=None):
"""See `_Head`."""
self._head_ops_called_times += 1
loss = losses.mean_squared_error(labels, logits)
return model_fn.ModelFnOps(
mode,
predictions={'loss': loss},
loss=loss,
train_op=train_op_fn(loss),
eval_metric_ops={'loss': loss})
@property
def head_ops_called_times(self):
return self._head_ops_called_times
class _StepCounterHook(session_run_hook.SessionRunHook):
"""Counts the number of training steps."""
def __init__(self):
self._steps = 0
def after_run(self, run_context, run_values):
del run_context, run_values
self._steps += 1
@property
def steps(self):
return self._steps
class EmbeddingMultiplierTest(test.TestCase):
"""dnn_model_fn tests."""
def testRaisesNonEmbeddingColumn(self):
one_hot_language = feature_column.one_hot_column(
feature_column.sparse_column_with_hash_bucket('language', 10))
params = {
'dnn_feature_columns': [one_hot_language],
'head': head_lib.multi_class_head(2),
'dnn_hidden_units': [1],
# Set lr mult to 0. to keep embeddings constant.
'embedding_lr_multipliers': {
one_hot_language: 0.0
},
'dnn_optimizer': 'Adagrad',
}
features = {
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1]),
}
labels = constant_op.constant([[0], [0], [0]], dtype=dtypes.int32)
with self.assertRaisesRegexp(ValueError,
'can only be defined for embedding columns'):
dnn_linear_combined._dnn_linear_combined_model_fn(features, labels,
model_fn.ModeKeys.TRAIN,
params)
def testMultipliesGradient(self):
embedding_language = feature_column.embedding_column(
feature_column.sparse_column_with_hash_bucket('language', 10),
dimension=1,
initializer=init_ops.constant_initializer(0.1))
embedding_wire = feature_column.embedding_column(
feature_column.sparse_column_with_hash_bucket('wire', 10),
dimension=1,
initializer=init_ops.constant_initializer(0.1))
params = {
'dnn_feature_columns': [embedding_language, embedding_wire],
'head': head_lib.multi_class_head(2),
'dnn_hidden_units': [1],
# Set lr mult to 0. to keep language embeddings constant, whereas wire
# embeddings will be trained.
'embedding_lr_multipliers': {
embedding_language: 0.0
},
'dnn_optimizer': 'Adagrad',
}
with ops.Graph().as_default():
features = {
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1]),
'wire':
sparse_tensor.SparseTensor(
values=['omar', 'stringer', 'marlo'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1]),
}
labels = constant_op.constant([[1], [0], [0]], dtype=dtypes.int32)
training_util.create_global_step()
model_ops = dnn_linear_combined._dnn_linear_combined_model_fn(
features, labels, model_fn.ModeKeys.TRAIN, params)
with monitored_session.MonitoredSession() as sess:
language_var = dnn_linear_combined._get_embedding_variable(
embedding_language, 'dnn', 'dnn/input_from_feature_columns')
language_initial_value = sess.run(language_var)
for _ in range(2):
_, language_value = sess.run([model_ops.train_op, language_var])
self.assertAllClose(language_value, language_initial_value)
# We could also test that wire_value changed, but that test would be flaky.
class DNNLinearCombinedEstimatorTest(test.TestCase):
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(
self, dnn_linear_combined.DNNLinearCombinedEstimator)
def testNoFeatureColumns(self):
with self.assertRaisesRegexp(
ValueError,
'Either linear_feature_columns or dnn_feature_columns must be defined'):
dnn_linear_combined.DNNLinearCombinedEstimator(
head=_CheckCallsHead(),
linear_feature_columns=None,
dnn_feature_columns=None,
dnn_hidden_units=[3, 3])
def testCheckCallsHead(self):
"""Tests binary classification using matrix data as input."""
head = _CheckCallsHead()
iris = test_data.prepare_iris_data_for_logistic_regression()
cont_features = [
feature_column.real_valued_column('feature', dimension=4)]
bucketized_feature = [feature_column.bucketized_column(
cont_features[0], test_data.get_quantile_based_buckets(iris.data, 10))]
estimator = dnn_linear_combined.DNNLinearCombinedEstimator(
head,
linear_feature_columns=bucketized_feature,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3])
estimator.fit(input_fn=test_data.iris_input_multiclass_fn, steps=10)
self.assertEqual(1, head.head_ops_called_times)
estimator.evaluate(input_fn=test_data.iris_input_multiclass_fn, steps=10)
self.assertEqual(2, head.head_ops_called_times)
estimator.predict(input_fn=test_data.iris_input_multiclass_fn)
self.assertEqual(3, head.head_ops_called_times)
class DNNLinearCombinedClassifierTest(test.TestCase):
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(
self, dnn_linear_combined.DNNLinearCombinedClassifier)
def testExperimentIntegration(self):
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
exp = experiment.Experiment(
estimator=dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=cont_features,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3]),
train_input_fn=test_data.iris_input_logistic_fn,
eval_input_fn=test_data.iris_input_logistic_fn)
exp.test()
def testNoFeatureColumns(self):
with self.assertRaisesRegexp(
ValueError,
'Either linear_feature_columns or dnn_feature_columns must be defined'):
dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=None,
dnn_feature_columns=None,
dnn_hidden_units=[3, 3])
def testNoDnnHiddenUnits(self):
def _input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 100)
age = feature_column.real_valued_column('age')
with self.assertRaisesRegexp(
ValueError,
'dnn_hidden_units must be defined when dnn_feature_columns is '
'specified'):
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
dnn_feature_columns=[age, language])
classifier.fit(input_fn=_input_fn, steps=2)
def testSyncReplicasOptimizerUnsupported(self):
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
sync_optimizer = sync_replicas_optimizer.SyncReplicasOptimizer(
opt=adagrad.AdagradOptimizer(learning_rate=0.1),
replicas_to_aggregate=1,
total_num_replicas=1)
sync_hook = sync_optimizer.make_session_run_hook(is_chief=True)
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
n_classes=3,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3],
dnn_optimizer=sync_optimizer)
with self.assertRaisesRegexp(
ValueError,
'SyncReplicasOptimizer is not supported in DNNLinearCombined model'):
classifier.fit(
input_fn=test_data.iris_input_multiclass_fn, steps=100,
monitors=[sync_hook])
def testEmbeddingMultiplier(self):
embedding_language = feature_column.embedding_column(
feature_column.sparse_column_with_hash_bucket('language', 10),
dimension=1,
initializer=init_ops.constant_initializer(0.1))
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
dnn_feature_columns=[embedding_language],
dnn_hidden_units=[3, 3],
embedding_lr_multipliers={embedding_language: 0.8})
self.assertEqual({
embedding_language: 0.8
}, classifier.params['embedding_lr_multipliers'])
def testInputPartitionSize(self):
def _input_fn_float_label(num_epochs=None):
features = {
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
labels = constant_op.constant([[0.8], [0.], [0.2]], dtype=dtypes.float32)
return features, labels
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(language_column, dimension=1),
]
# Set num_ps_replica to be 10 and the min slice size to be extremely small,
# so as to ensure that there'll be 10 partititions produced.
config = run_config.RunConfig(tf_random_seed=1)
config._num_ps_replicas = 10
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
n_classes=2,
dnn_feature_columns=feature_columns,
dnn_hidden_units=[3, 3],
dnn_optimizer='Adagrad',
config=config,
input_layer_min_slice_size=1)
# Ensure the param is passed in.
self.assertTrue(callable(classifier.params['input_layer_partitioner']))
# Ensure the partition count is 10.
classifier.fit(input_fn=_input_fn_float_label, steps=50)
partition_count = 0
for name in classifier.get_variable_names():
if 'language_embedding' in name and 'Adagrad' in name:
partition_count += 1
self.assertEqual(10, partition_count)
def testLogisticRegression_MatrixData(self):
"""Tests binary classification using matrix data as input."""
iris = test_data.prepare_iris_data_for_logistic_regression()
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
bucketized_feature = [
feature_column.bucketized_column(
cont_features[0],
test_data.get_quantile_based_buckets(iris.data, 10))
]
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=bucketized_feature,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=test_data.iris_input_logistic_fn, steps=100)
scores = classifier.evaluate(
input_fn=test_data.iris_input_logistic_fn, steps=100)
_assert_metrics_in_range(('accuracy', 'auc'), scores)
def testLogisticRegression_TensorData(self):
"""Tests binary classification using Tensor data as input."""
def _input_fn():
iris = test_data.prepare_iris_data_for_logistic_regression()
features = {}
for i in range(4):
# The following shows how to provide the Tensor data for
# RealValuedColumns.
features.update({
str(i):
array_ops.reshape(
constant_op.constant(
iris.data[:, i], dtype=dtypes.float32), [-1, 1])
})
# The following shows how to provide the SparseTensor data for
# a SparseColumn.
features['dummy_sparse_column'] = sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [60, 0]],
dense_shape=[len(iris.target), 2])
labels = array_ops.reshape(
constant_op.constant(
iris.target, dtype=dtypes.int32), [-1, 1])
return features, labels
iris = test_data.prepare_iris_data_for_logistic_regression()
cont_features = [
feature_column.real_valued_column(str(i)) for i in range(4)
]
linear_features = [
feature_column.bucketized_column(cont_features[i],
test_data.get_quantile_based_buckets(
iris.data[:, i], 10))
for i in range(4)
]
linear_features.append(
feature_column.sparse_column_with_hash_bucket(
'dummy_sparse_column', hash_bucket_size=100))
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=linear_features,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_input_fn, steps=100)
_assert_metrics_in_range(('accuracy', 'auc'), scores)
def testEstimatorWithCoreFeatureColumns(self):
"""Tests binary classification using Tensor data as input."""
def _input_fn():
iris = test_data.prepare_iris_data_for_logistic_regression()
features = {}
for i in range(4):
# The following shows how to provide the Tensor data for
# RealValuedColumns.
features.update({
str(i):
array_ops.reshape(
constant_op.constant(iris.data[:, i], dtype=dtypes.float32),
[-1, 1])
})
# The following shows how to provide the SparseTensor data for
# a SparseColumn.
features['dummy_sparse_column'] = sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [60, 0]],
dense_shape=[len(iris.target), 2])
labels = array_ops.reshape(
constant_op.constant(iris.target, dtype=dtypes.int32), [-1, 1])
return features, labels
iris = test_data.prepare_iris_data_for_logistic_regression()
cont_features = [fc_core.numeric_column(str(i)) for i in range(4)]
linear_features = [
fc_core.bucketized_column(
cont_features[i],
sorted(set(test_data.get_quantile_based_buckets(
iris.data[:, i], 10)))) for i in range(4)
]
linear_features.append(
fc_core.categorical_column_with_hash_bucket(
'dummy_sparse_column', hash_bucket_size=100))
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=linear_features,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_input_fn, steps=100)
_assert_metrics_in_range(('accuracy', 'auc'), scores)
def testTrainWithPartitionedVariables(self):
"""Tests training with partitioned variables."""
def _input_fn():
features = {
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
labels = constant_op.constant([[1], [0], [0]])
return features, labels
sparse_features = [
# The given hash_bucket_size results in variables larger than the
# default min_slice_size attribute, so the variables are partitioned.
feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=2e7)
]
embedding_features = [
feature_column.embedding_column(
sparse_features[0], dimension=1)
]
tf_config = {
'cluster': {
run_config.TaskType.PS: ['fake_ps_0', 'fake_ps_1']
}
}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig()
# Because we did not start a distributed cluster, we need to pass an
# empty ClusterSpec, otherwise the device_setter will look for
# distributed jobs, such as "/job:ps" which are not present.
config._cluster_spec = server_lib.ClusterSpec({})
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=sparse_features,
dnn_feature_columns=embedding_features,
dnn_hidden_units=[3, 3],
config=config)
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
_assert_metrics_in_range(('accuracy', 'auc'), scores)
def testMultiClass(self):
"""Tests multi-class classification using matrix data as input.
Please see testLogisticRegression_TensorData() for how to use Tensor
data as input instead.
"""
iris = base.load_iris()
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
bucketized_features = [
feature_column.bucketized_column(
cont_features[0],
test_data.get_quantile_based_buckets(iris.data, 10))
]
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
n_classes=3,
linear_feature_columns=bucketized_features,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=test_data.iris_input_multiclass_fn, steps=100)
scores = classifier.evaluate(
input_fn=test_data.iris_input_multiclass_fn, steps=100)
_assert_metrics_in_range(('accuracy',), scores)
def testMultiClassLabelKeys(self):
"""Tests n_classes > 2 with label_keys vocabulary for labels."""
# Byte literals needed for python3 test to pass.
label_keys = [b'label0', b'label1', b'label2']
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [0.2], [.1]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
labels = constant_op.constant(
[[label_keys[1]], [label_keys[0]], [label_keys[0]]],
dtype=dtypes.string)
return features, labels
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
n_classes=3,
linear_feature_columns=[language_column],
dnn_feature_columns=[
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
],
dnn_hidden_units=[3, 3],
label_keys=label_keys)
classifier.fit(input_fn=_input_fn, steps=50)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
_assert_metrics_in_range(('accuracy',), scores)
self.assertIn('loss', scores)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predicted_classes = list(
classifier.predict_classes(
input_fn=predict_input_fn, as_iterable=True))
self.assertEqual(3, len(predicted_classes))
for pred in predicted_classes:
self.assertIn(pred, label_keys)
predictions = list(
classifier.predict(input_fn=predict_input_fn, as_iterable=True))
self.assertAllEqual(predicted_classes, predictions)
def testLoss(self):
"""Tests loss calculation."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# The logistic prediction should be (y = 0.25).
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),}
labels = constant_op.constant([[1], [0], [0], [0]])
return features, labels
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
n_classes=2,
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn_train, steps=100)
scores = classifier.evaluate(input_fn=_input_fn_train, steps=1)
# Cross entropy = -0.25*log(0.25)-0.75*log(0.75) = 0.562
self.assertAlmostEqual(0.562, scores['loss'], delta=0.1)
def testLossWithWeights(self):
"""Tests loss calculation with weights."""
def _input_fn_train():
# 4 rows with equal weight, one of them (y = x), three of them (y=Not(x))
# The logistic prediction should be (y = 0.25).
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
return features, labels
def _input_fn_eval():
# 4 rows, with different weights.
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[7.], [1.], [1.], [1.]])
}
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
return features, labels
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
weight_column_name='w',
n_classes=2,
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn_train, steps=100)
scores = classifier.evaluate(input_fn=_input_fn_eval, steps=1)
# Weighted cross entropy = (-7*log(0.25)-3*log(0.75))/10 = 1.06
self.assertAlmostEqual(1.06, scores['loss'], delta=0.1)
def testTrainWithWeights(self):
"""Tests training with given weight column."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# First row has more weight than others. Model should fit (y=x) better
# than (y=Not(x)) due to the relative higher weight of the first row.
labels = constant_op.constant([[1], [0], [0], [0]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[100.], [3.], [2.], [2.]])
}
return features, labels
def _input_fn_eval():
# Create 4 rows (y = x).
labels = constant_op.constant([[1], [1], [1], [1]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
weight_column_name='w',
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn_train, steps=100)
scores = classifier.evaluate(input_fn=_input_fn_eval, steps=1)
_assert_metrics_in_range(('accuracy',), scores)
def testCustomOptimizerByObject(self):
"""Tests binary classification using matrix data as input."""
iris = test_data.prepare_iris_data_for_logistic_regression()
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
bucketized_features = [
feature_column.bucketized_column(
cont_features[0],
test_data.get_quantile_based_buckets(iris.data, 10))
]
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=bucketized_features,
linear_optimizer=ftrl.FtrlOptimizer(learning_rate=0.1),
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3],
dnn_optimizer=adagrad.AdagradOptimizer(learning_rate=0.1))
classifier.fit(input_fn=test_data.iris_input_logistic_fn, steps=100)
scores = classifier.evaluate(
input_fn=test_data.iris_input_logistic_fn, steps=100)
_assert_metrics_in_range(('accuracy',), scores)
def testCustomOptimizerByString(self):
"""Tests binary classification using matrix data as input."""
iris = test_data.prepare_iris_data_for_logistic_regression()
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
bucketized_features = [
feature_column.bucketized_column(
cont_features[0],
test_data.get_quantile_based_buckets(iris.data, 10))
]
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=bucketized_features,
linear_optimizer='Ftrl',
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3],
dnn_optimizer='Adagrad')
classifier.fit(input_fn=test_data.iris_input_logistic_fn, steps=100)
scores = classifier.evaluate(
input_fn=test_data.iris_input_logistic_fn, steps=100)
_assert_metrics_in_range(('accuracy',), scores)
def testCustomOptimizerByFunction(self):
"""Tests binary classification using matrix data as input."""
iris = test_data.prepare_iris_data_for_logistic_regression()
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
bucketized_features = [
feature_column.bucketized_column(
cont_features[0],
test_data.get_quantile_based_buckets(iris.data, 10))
]
def _optimizer_exp_decay():
global_step = training_util.get_global_step()
learning_rate = learning_rate_decay.exponential_decay(
learning_rate=0.1,
global_step=global_step,
decay_steps=100,
decay_rate=0.001)
return adagrad.AdagradOptimizer(learning_rate=learning_rate)
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=bucketized_features,
linear_optimizer=_optimizer_exp_decay,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3],
dnn_optimizer=_optimizer_exp_decay)
classifier.fit(input_fn=test_data.iris_input_logistic_fn, steps=100)
scores = classifier.evaluate(
input_fn=test_data.iris_input_logistic_fn, steps=100)
_assert_metrics_in_range(('accuracy',), scores)
def testPredict(self):
"""Tests weight column in evaluation."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = constant_op.constant([[1], [0], [0], [0]])
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32)}
return features, labels
def _input_fn_predict():
y = input_lib.limit_epochs(
array_ops.ones(
shape=[4, 1], dtype=dtypes.float32), num_epochs=1)
features = {'x': y}
return features
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=_input_fn_train, steps=100)
probs = list(classifier.predict_proba(input_fn=_input_fn_predict))
self.assertAllClose([[0.75, 0.25]] * 4, probs, 0.05)
classes = list(classifier.predict_classes(input_fn=_input_fn_predict))
self.assertListEqual([0] * 4, classes)
def testCustomMetrics(self):
"""Tests custom evaluation metrics."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = constant_op.constant([[1], [0], [0], [0]])
features = {
'x':
input_lib.limit_epochs(
array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
num_epochs=num_epochs)
}
return features, labels
def _my_metric_op(predictions, labels):
# For the case of binary classification, the 2nd column of "predictions"
# denotes the model predictions.
labels = math_ops.to_float(labels)
predictions = array_ops.strided_slice(
predictions, [0, 1], [-1, 2], end_mask=1)
return math_ops.reduce_sum(math_ops.multiply(predictions, labels))
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(
input_fn=_input_fn,
steps=100,
metrics={
'my_accuracy':
MetricSpec(
metric_fn=metric_ops.streaming_accuracy,
prediction_key='classes'),
'my_precision':
MetricSpec(
metric_fn=metric_ops.streaming_precision,
prediction_key='classes'),
'my_metric':
MetricSpec(
metric_fn=_my_metric_op, prediction_key='probabilities')
})
self.assertTrue(
set(['loss', 'my_accuracy', 'my_precision', 'my_metric']).issubset(
set(scores.keys())))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(list(classifier.predict_classes(
input_fn=predict_input_fn)))
self.assertEqual(
_sklearn.accuracy_score([1, 0, 0, 0], predictions),
scores['my_accuracy'])
# Test the case where the 2nd element of the key is neither "classes" nor
# "probabilities".
with self.assertRaisesRegexp(KeyError, 'bad_type'):
classifier.evaluate(
input_fn=_input_fn,
steps=100,
metrics={('bad_name', 'bad_type'): metric_ops.streaming_auc})
# Test the case where the tuple of the key doesn't have 2 elements.
with self.assertRaises(ValueError):
classifier.evaluate(
input_fn=_input_fn,
steps=100,
metrics={
('bad_length_name', 'classes', 'bad_length'):
metric_ops.streaming_accuracy
})
# Test the case where the prediction_key is neither "classes" nor
# "probabilities".
with self.assertRaisesRegexp(KeyError, 'bad_type'):
classifier.evaluate(
input_fn=_input_fn,
steps=100,
metrics={
'bad_name':
MetricSpec(
metric_fn=metric_ops.streaming_auc,
prediction_key='bad_type')
})
def testVariableQuery(self):
"""Tests get_variable_names and get_variable_value."""
def _input_fn_train():
# Create 4 rows, three (y = x), one (y=Not(x))
labels = constant_op.constant([[1], [1], [1], [0]])
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),}
return features, labels
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=_input_fn_train, steps=500)
var_names = classifier.get_variable_names()
self.assertGreater(len(var_names), 3)
for name in var_names:
classifier.get_variable_value(name)
def testExport(self):
"""Tests export model for servo."""
def input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 100)
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[
feature_column.real_valued_column('age'),
language,
],
dnn_feature_columns=[
feature_column.embedding_column(
language, dimension=1),
],
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=input_fn, steps=100)
export_dir = tempfile.mkdtemp()
input_feature_key = 'examples'
def serving_input_fn():
features, targets = input_fn()
features[input_feature_key] = array_ops.placeholder(dtypes.string)
return features, targets
classifier.export(
export_dir,
serving_input_fn,
input_feature_key,
use_deprecated_input_fn=False)
def testCenteredBias(self):
"""Tests bias is centered or not."""
def _input_fn_train():
# Create 4 rows, three (y = x), one (y=Not(x))
labels = constant_op.constant([[1], [1], [1], [0]])
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),}
return features, labels
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
enable_centered_bias=True)
classifier.fit(input_fn=_input_fn_train, steps=1000)
self.assertIn('binary_logistic_head/centered_bias_weight',
classifier.get_variable_names())
# logodds(0.75) = 1.09861228867
self.assertAlmostEqual(
1.0986,
float(classifier.get_variable_value(
'binary_logistic_head/centered_bias_weight')[0]),
places=2)
def testDisableCenteredBias(self):
"""Tests bias is centered or not."""
def _input_fn_train():
# Create 4 rows, three (y = x), one (y=Not(x))
labels = constant_op.constant([[1], [1], [1], [0]])
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),}
return features, labels
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
enable_centered_bias=False)
classifier.fit(input_fn=_input_fn_train, steps=500)
self.assertNotIn('centered_bias_weight', classifier.get_variable_names())
def testGlobalStepLinearOnly(self):
"""Tests global step update for linear-only model."""
def input_fn():
return {
'age': constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 10)
age = feature_column.real_valued_column('age')
step_counter = _StepCounterHook()
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[age, language])
classifier.fit(input_fn=input_fn, steps=100, monitors=[step_counter])
self.assertEqual(100, step_counter.steps)
def testGlobalStepDNNOnly(self):
"""Tests global step update for dnn-only model."""
def input_fn():
return {
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 10)
step_counter = _StepCounterHook()
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
dnn_feature_columns=[
feature_column.embedding_column(language, dimension=1)],
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=input_fn, steps=100, monitors=[step_counter])
self.assertEqual(100, step_counter.steps)
def testGlobalStepDNNLinearCombinedBug(self):
"""Tests global step update for dnn-linear combined model."""
def input_fn():
return {
'age': constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 10)
age = feature_column.real_valued_column('age')
step_counter = _StepCounterHook()
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[age, language],
dnn_feature_columns=[
feature_column.embedding_column(language, dimension=1)],
dnn_hidden_units=[3, 3],
fix_global_step_increment_bug=False)
classifier.fit(input_fn=input_fn, steps=100, monitors=[step_counter])
global_step = classifier.get_variable_value('global_step')
if global_step == 100:
# Expected is 100, but because of the global step increment bug, is 50.
# Occasionally, step increments one more time due to a race condition,
# reaching 51 steps.
self.assertIn(step_counter.steps, [50, 51])
else:
# Occasionally, training stops when global_step == 102, due to a race
# condition. In addition, occasionally step increments one more time due
# to a race condition reaching 52 steps.
self.assertIn(step_counter.steps, [51, 52])
def testGlobalStepDNNLinearCombinedBugFixed(self):
"""Tests global step update for dnn-linear combined model."""
def input_fn():
return {
'age': constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 10)
age = feature_column.real_valued_column('age')
step_counter = _StepCounterHook()
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[age, language],
dnn_feature_columns=[
feature_column.embedding_column(language, dimension=1)],
dnn_hidden_units=[3, 3],
fix_global_step_increment_bug=True)
classifier.fit(input_fn=input_fn, steps=100, monitors=[step_counter])
self.assertEqual(100, step_counter.steps)
def testLinearOnly(self):
"""Tests that linear-only instantiation works."""
def input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 100)
age = feature_column.real_valued_column('age')
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[age, language])
classifier.fit(input_fn=input_fn, steps=100)
loss1 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
classifier.fit(input_fn=input_fn, steps=200)
loss2 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss2, loss1)
variable_names = classifier.get_variable_names()
self.assertNotIn('dnn/logits/biases', variable_names)
self.assertNotIn('dnn/logits/weights', variable_names)
self.assertIn('linear/bias_weight', variable_names)
self.assertIn('linear/age/weight', variable_names)
self.assertIn('linear/language/weights', variable_names)
self.assertEquals(
1, len(classifier.get_variable_value('linear/age/weight')))
self.assertEquals(
100, len(classifier.get_variable_value('linear/language/weights')))
def testLinearOnlyOneFeature(self):
"""Tests that linear-only instantiation works for one feature only."""
def input_fn():
return {
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 99)
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[language])
classifier.fit(input_fn=input_fn, steps=100)
loss1 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
classifier.fit(input_fn=input_fn, steps=200)
loss2 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss2, loss1)
variable_names = classifier.get_variable_names()
self.assertNotIn('dnn/logits/biases', variable_names)
self.assertNotIn('dnn/logits/weights', variable_names)
self.assertIn('linear/bias_weight', variable_names)
self.assertIn('linear/language/weights', variable_names)
self.assertEquals(
1, len(classifier.get_variable_value('linear/bias_weight')))
self.assertEquals(
99, len(classifier.get_variable_value('linear/language/weights')))
def testDNNOnly(self):
"""Tests that DNN-only instantiation works."""
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
n_classes=3, dnn_feature_columns=cont_features, dnn_hidden_units=[3, 3])
classifier.fit(input_fn=test_data.iris_input_multiclass_fn, steps=1000)
classifier.evaluate(input_fn=test_data.iris_input_multiclass_fn, steps=100)
variable_names = classifier.get_variable_names()
self.assertIn('dnn/hiddenlayer_0/weights', variable_names)
self.assertIn('dnn/hiddenlayer_0/biases', variable_names)
self.assertIn('dnn/hiddenlayer_1/weights', variable_names)
self.assertIn('dnn/hiddenlayer_1/biases', variable_names)
self.assertIn('dnn/logits/weights', variable_names)
self.assertIn('dnn/logits/biases', variable_names)
self.assertNotIn('linear/bias_weight', variable_names)
self.assertNotIn('linear/feature_BUCKETIZED/weight', variable_names)
def testDNNWeightsBiasesNames(self):
"""Tests the names of DNN weights and biases in the checkpoints."""
def _input_fn_train():
# Create 4 rows, three (y = x), one (y=Not(x))
labels = constant_op.constant([[1], [1], [1], [0]])
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),}
return features, labels
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=_input_fn_train, steps=5)
variable_names = classifier.get_variable_names()
self.assertIn('dnn/hiddenlayer_0/weights', variable_names)
self.assertIn('dnn/hiddenlayer_0/biases', variable_names)
self.assertIn('dnn/hiddenlayer_1/weights', variable_names)
self.assertIn('dnn/hiddenlayer_1/biases', variable_names)
self.assertIn('dnn/logits/weights', variable_names)
self.assertIn('dnn/logits/biases', variable_names)
class DNNLinearCombinedRegressorTest(test.TestCase):
def testExperimentIntegration(self):
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
exp = experiment.Experiment(
estimator=dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=cont_features,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3]),
train_input_fn=test_data.iris_input_logistic_fn,
eval_input_fn=test_data.iris_input_logistic_fn)
exp.test()
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(
self, dnn_linear_combined.DNNLinearCombinedRegressor)
def testRegression_MatrixData(self):
"""Tests regression using matrix data as input."""
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=cont_features,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=test_data.iris_input_logistic_fn, steps=10)
scores = regressor.evaluate(
input_fn=test_data.iris_input_logistic_fn, steps=1)
self.assertIn('loss', scores.keys())
def testRegression_TensorData(self):
"""Tests regression using tensor data as input."""
def _input_fn():
# Create 4 rows of (y = x)
labels = constant_op.constant([[100.], [3.], [2.], [2.]])
features = {'x': constant_op.constant([[100.], [3.], [2.], [2.]])}
return features, labels
classifier = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=10)
classifier.evaluate(input_fn=_input_fn, steps=1)
def testLoss(self):
"""Tests loss calculation."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# The algorithm should learn (y = 0.25).
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),}
return features, labels
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=100)
scores = regressor.evaluate(input_fn=_input_fn_train, steps=1)
# Average square loss = (0.75^2 + 3*0.25^2) / 4 = 0.1875
self.assertAlmostEqual(0.1875, scores['loss'], delta=0.1)
def testLossWithWeights(self):
"""Tests loss calculation with weights."""
def _input_fn_train():
# 4 rows with equal weight, one of them (y = x), three of them (y=Not(x))
# The algorithm should learn (y = 0.25).
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
def _input_fn_eval():
# 4 rows, with different weights.
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[7.], [1.], [1.], [1.]])
}
return features, labels
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
weight_column_name='w',
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=100)
scores = regressor.evaluate(input_fn=_input_fn_eval, steps=1)
# Weighted average square loss = (7*0.75^2 + 3*0.25^2) / 10 = 0.4125
self.assertAlmostEqual(0.4125, scores['loss'], delta=0.1)
def testTrainWithWeights(self):
"""Tests training with given weight column."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# First row has more weight than others. Model should fit (y=x) better
# than (y=Not(x)) due to the relative higher weight of the first row.
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[100.], [3.], [2.], [2.]])
}
return features, labels
def _input_fn_eval():
# Create 4 rows (y = x)
labels = constant_op.constant([[1.], [1.], [1.], [1.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
weight_column_name='w',
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=100)
scores = regressor.evaluate(input_fn=_input_fn_eval, steps=1)
# The model should learn (y = x) because of the weights, so the loss should
# be close to zero.
self.assertLess(scores['loss'], 0.2)
def testPredict_AsIterableFalse(self):
"""Tests predict method with as_iterable=False."""
labels = [1., 0., 0.2]
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(labels, dtype=dtypes.float32)
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[
language_column, feature_column.real_valued_column('age')
],
dnn_feature_columns=[
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=10)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores.keys())
regressor.predict_scores(input_fn=_input_fn, as_iterable=False)
def testPredict_AsIterable(self):
"""Tests predict method with as_iterable=True."""
labels = [1., 0., 0.2]
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(labels, dtype=dtypes.float32)
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[
language_column, feature_column.real_valued_column('age')
],
dnn_feature_columns=[
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=10)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores.keys())
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
regressor.predict_scores(input_fn=predict_input_fn, as_iterable=True)
def testCustomMetrics(self):
"""Tests custom evaluation metrics."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x':
input_lib.limit_epochs(
array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
num_epochs=num_epochs)
}
return features, labels
def _my_metric_op(predictions, labels):
return math_ops.reduce_sum(math_ops.multiply(predictions, labels))
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=10)
scores = regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
'my_error': metric_ops.streaming_mean_squared_error,
('my_metric', 'scores'): _my_metric_op
})
self.assertIn('loss', set(scores.keys()))
self.assertIn('my_error', set(scores.keys()))
self.assertIn('my_metric', set(scores.keys()))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(list(regressor.predict_scores(
input_fn=predict_input_fn)))
self.assertAlmostEqual(
_sklearn.mean_squared_error(np.array([1, 0, 0, 0]), predictions),
scores['my_error'])
# Tests the case that the 2nd element of the key is not "scores".
with self.assertRaises(KeyError):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
('my_error', 'predictions'):
metric_ops.streaming_mean_squared_error
})
# Tests the case where the tuple of the key doesn't have 2 elements.
with self.assertRaises(ValueError):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
('bad_length_name', 'scores', 'bad_length'):
metric_ops.streaming_mean_squared_error
})
def testCustomMetricsWithMetricSpec(self):
"""Tests custom evaluation metrics."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x':
input_lib.limit_epochs(
array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
num_epochs=num_epochs)
}
return features, labels
def _my_metric_op(predictions, labels):
return math_ops.reduce_sum(math_ops.multiply(predictions, labels))
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=5)
scores = regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
'my_error':
MetricSpec(
metric_fn=metric_ops.streaming_mean_squared_error,
prediction_key='scores'),
'my_metric':
MetricSpec(
metric_fn=_my_metric_op, prediction_key='scores')
})
self.assertIn('loss', set(scores.keys()))
self.assertIn('my_error', set(scores.keys()))
self.assertIn('my_metric', set(scores.keys()))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(list(regressor.predict_scores(
input_fn=predict_input_fn)))
self.assertAlmostEqual(
_sklearn.mean_squared_error(np.array([1, 0, 0, 0]), predictions),
scores['my_error'])
# Tests the case where the prediction_key is not "scores".
with self.assertRaisesRegexp(KeyError, 'bad_type'):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
'bad_name':
MetricSpec(
metric_fn=metric_ops.streaming_auc,
prediction_key='bad_type')
})
def testExport(self):
"""Tests export model for servo."""
labels = [1., 0., 0.2]
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(labels, dtype=dtypes.float32)
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[
language_column, feature_column.real_valued_column('age')
],
dnn_feature_columns=[
feature_column.embedding_column(
language_column, dimension=1),
],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=10)
export_dir = tempfile.mkdtemp()
input_feature_key = 'examples'
def serving_input_fn():
features, targets = _input_fn()
features[input_feature_key] = array_ops.placeholder(dtypes.string)
return features, targets
regressor.export(
export_dir,
serving_input_fn,
input_feature_key,
use_deprecated_input_fn=False)
def testTrainSaveLoad(self):
"""Tests regression with restarting training / evaluate."""
def _input_fn(num_epochs=None):
# Create 4 rows of (y = x)
labels = constant_op.constant([[100.], [3.], [2.], [2.]])
features = {
'x':
input_lib.limit_epochs(
constant_op.constant([[100.], [3.], [2.], [2.]]),
num_epochs=num_epochs)
}
return features, labels
model_dir = tempfile.mkdtemp()
# pylint: disable=g-long-lambda
new_regressor = lambda: dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
model_dir=model_dir,
config=run_config.RunConfig(tf_random_seed=1))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
regressor = new_regressor()
regressor.fit(input_fn=_input_fn, steps=10)
predictions = list(regressor.predict_scores(input_fn=predict_input_fn))
del regressor
regressor = new_regressor()
predictions2 = list(regressor.predict_scores(input_fn=predict_input_fn))
self.assertAllClose(predictions, predictions2)
def testTrainWithPartitionedVariables(self):
"""Tests training with partitioned variables."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
# The given hash_bucket_size results in variables larger than the
# default min_slice_size attribute, so the variables are partitioned.
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=2e7)
tf_config = {
'cluster': {
run_config.TaskType.PS: ['fake_ps_0', 'fake_ps_1']
}
}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig(tf_random_seed=1)
# Because we did not start a distributed cluster, we need to pass an
# empty ClusterSpec, otherwise the device_setter will look for
# distributed jobs, such as "/job:ps" which are not present.
config._cluster_spec = server_lib.ClusterSpec({})
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[
language_column, feature_column.real_valued_column('age')
],
dnn_feature_columns=[
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
],
dnn_hidden_units=[3, 3],
config=config)
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores.keys())
def testDisableCenteredBias(self):
"""Tests that we can disable centered bias."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[
language_column, feature_column.real_valued_column('age')
],
dnn_feature_columns=[
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
],
dnn_hidden_units=[3, 3],
enable_centered_bias=False,
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores.keys())
def testLinearOnly(self):
"""Tests linear-only instantiation and training."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[
language_column, feature_column.real_valued_column('age')
],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores.keys())
def testDNNOnly(self):
"""Tests DNN-only instantiation and training."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
dnn_feature_columns=[
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores.keys())
class FeatureEngineeringFunctionTest(test.TestCase):
"""Tests feature_engineering_fn."""
def testNoneFeatureEngineeringFn(self):
def input_fn():
# Create 4 rows of (y = x)
labels = constant_op.constant([[100.], [3.], [2.], [2.]])
features = {'x': constant_op.constant([[100.], [3.], [2.], [2.]])}
return features, labels
def feature_engineering_fn(features, labels):
_, _ = features, labels
labels = constant_op.constant([[1000.], [30.], [20.], [20.]])
features = {'x': constant_op.constant([[1000.], [30.], [20.], [20.]])}
return features, labels
estimator_with_fe_fn = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1),
feature_engineering_fn=feature_engineering_fn)
estimator_with_fe_fn.fit(input_fn=input_fn, steps=110)
estimator_without_fe_fn = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
estimator_without_fe_fn.fit(input_fn=input_fn, steps=110)
# predictions = y
prediction_with_fe_fn = next(
estimator_with_fe_fn.predict_scores(
input_fn=input_fn, as_iterable=True))
self.assertAlmostEqual(1000., prediction_with_fe_fn, delta=10.0)
prediction_without_fe_fn = next(
estimator_without_fe_fn.predict_scores(
input_fn=input_fn, as_iterable=True))
self.assertAlmostEqual(100., prediction_without_fe_fn, delta=1.0)
if __name__ == '__main__':
test.main()
| apache-2.0 |
anurag313/scikit-learn | sklearn/decomposition/__init__.py | 147 | 1421 | """
The :mod:`sklearn.decomposition` module includes matrix decomposition
algorithms, including among others PCA, NMF or ICA. Most of the algorithms of
this module can be regarded as dimensionality reduction techniques.
"""
from .nmf import NMF, ProjectedGradientNMF
from .pca import PCA, RandomizedPCA
from .incremental_pca import IncrementalPCA
from .kernel_pca import KernelPCA
from .sparse_pca import SparsePCA, MiniBatchSparsePCA
from .truncated_svd import TruncatedSVD
from .fastica_ import FastICA, fastica
from .dict_learning import (dict_learning, dict_learning_online, sparse_encode,
DictionaryLearning, MiniBatchDictionaryLearning,
SparseCoder)
from .factor_analysis import FactorAnalysis
from ..utils.extmath import randomized_svd
from .online_lda import LatentDirichletAllocation
__all__ = ['DictionaryLearning',
'FastICA',
'IncrementalPCA',
'KernelPCA',
'MiniBatchDictionaryLearning',
'MiniBatchSparsePCA',
'NMF',
'PCA',
'ProjectedGradientNMF',
'RandomizedPCA',
'SparseCoder',
'SparsePCA',
'dict_learning',
'dict_learning_online',
'fastica',
'randomized_svd',
'sparse_encode',
'FactorAnalysis',
'TruncatedSVD',
'LatentDirichletAllocation']
| bsd-3-clause |
olologin/scikit-learn | sklearn/tests/test_cross_validation.py | 24 | 47465 | """Test the cross_validation module"""
from __future__ import division
import warnings
import numpy as np
from scipy.sparse import coo_matrix
from scipy.sparse import csr_matrix
from scipy import stats
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.mocking import CheckingClassifier, MockDataFrame
with warnings.catch_warnings():
warnings.simplefilter('ignore')
from sklearn import cross_validation as cval
from sklearn.datasets import make_regression
from sklearn.datasets import load_boston
from sklearn.datasets import load_digits
from sklearn.datasets import load_iris
from sklearn.datasets import make_multilabel_classification
from sklearn.metrics import explained_variance_score
from sklearn.metrics import make_scorer
from sklearn.metrics import precision_score
from sklearn.externals import six
from sklearn.externals.six.moves import zip
from sklearn.linear_model import Ridge
from sklearn.multiclass import OneVsRestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.cluster import KMeans
from sklearn.preprocessing import Imputer
from sklearn.pipeline import Pipeline
class MockClassifier(object):
"""Dummy classifier to test the cross-validation"""
def __init__(self, a=0, allow_nd=False):
self.a = a
self.allow_nd = allow_nd
def fit(self, X, Y=None, sample_weight=None, class_prior=None,
sparse_sample_weight=None, sparse_param=None, dummy_int=None,
dummy_str=None, dummy_obj=None, callback=None):
"""The dummy arguments are to test that this fit function can
accept non-array arguments through cross-validation, such as:
- int
- str (this is actually array-like)
- object
- function
"""
self.dummy_int = dummy_int
self.dummy_str = dummy_str
self.dummy_obj = dummy_obj
if callback is not None:
callback(self)
if self.allow_nd:
X = X.reshape(len(X), -1)
if X.ndim >= 3 and not self.allow_nd:
raise ValueError('X cannot be d')
if sample_weight is not None:
assert_true(sample_weight.shape[0] == X.shape[0],
'MockClassifier extra fit_param sample_weight.shape[0]'
' is {0}, should be {1}'.format(sample_weight.shape[0],
X.shape[0]))
if class_prior is not None:
assert_true(class_prior.shape[0] == len(np.unique(y)),
'MockClassifier extra fit_param class_prior.shape[0]'
' is {0}, should be {1}'.format(class_prior.shape[0],
len(np.unique(y))))
if sparse_sample_weight is not None:
fmt = ('MockClassifier extra fit_param sparse_sample_weight'
'.shape[0] is {0}, should be {1}')
assert_true(sparse_sample_weight.shape[0] == X.shape[0],
fmt.format(sparse_sample_weight.shape[0], X.shape[0]))
if sparse_param is not None:
fmt = ('MockClassifier extra fit_param sparse_param.shape '
'is ({0}, {1}), should be ({2}, {3})')
assert_true(sparse_param.shape == P_sparse.shape,
fmt.format(sparse_param.shape[0],
sparse_param.shape[1],
P_sparse.shape[0], P_sparse.shape[1]))
return self
def predict(self, T):
if self.allow_nd:
T = T.reshape(len(T), -1)
return T[:, 0]
def score(self, X=None, Y=None):
return 1. / (1 + np.abs(self.a))
def get_params(self, deep=False):
return {'a': self.a, 'allow_nd': self.allow_nd}
X = np.ones((10, 2))
X_sparse = coo_matrix(X)
W_sparse = coo_matrix((np.array([1]), (np.array([1]), np.array([0]))),
shape=(10, 1))
P_sparse = coo_matrix(np.eye(5))
# avoid StratifiedKFold's Warning about least populated class in y
y = np.arange(10) % 3
##############################################################################
# Tests
def check_valid_split(train, test, n_samples=None):
# Use python sets to get more informative assertion failure messages
train, test = set(train), set(test)
# Train and test split should not overlap
assert_equal(train.intersection(test), set())
if n_samples is not None:
# Check that the union of train an test split cover all the indices
assert_equal(train.union(test), set(range(n_samples)))
def check_cv_coverage(cv, expected_n_iter=None, n_samples=None):
# Check that a all the samples appear at least once in a test fold
if expected_n_iter is not None:
assert_equal(len(cv), expected_n_iter)
else:
expected_n_iter = len(cv)
collected_test_samples = set()
iterations = 0
for train, test in cv:
check_valid_split(train, test, n_samples=n_samples)
iterations += 1
collected_test_samples.update(test)
# Check that the accumulated test samples cover the whole dataset
assert_equal(iterations, expected_n_iter)
if n_samples is not None:
assert_equal(collected_test_samples, set(range(n_samples)))
def test_kfold_valueerrors():
# Check that errors are raised if there is not enough samples
assert_raises(ValueError, cval.KFold, 3, 4)
# Check that a warning is raised if the least populated class has too few
# members.
y = [3, 3, -1, -1, 3]
cv = assert_warns_message(Warning, "The least populated class",
cval.StratifiedKFold, y, 3)
# Check that despite the warning the folds are still computed even
# though all the classes are not necessarily represented at on each
# side of the split at each split
check_cv_coverage(cv, expected_n_iter=3, n_samples=len(y))
# Check that errors are raised if all n_labels for individual
# classes are less than n_folds.
y = [3, 3, -1, -1, 2]
assert_raises(ValueError, cval.StratifiedKFold, y, 3)
# Error when number of folds is <= 1
assert_raises(ValueError, cval.KFold, 2, 0)
assert_raises(ValueError, cval.KFold, 2, 1)
error_string = ("k-fold cross validation requires at least one"
" train / test split")
assert_raise_message(ValueError, error_string,
cval.StratifiedKFold, y, 0)
assert_raise_message(ValueError, error_string,
cval.StratifiedKFold, y, 1)
# When n is not integer:
assert_raises(ValueError, cval.KFold, 2.5, 2)
# When n_folds is not integer:
assert_raises(ValueError, cval.KFold, 5, 1.5)
assert_raises(ValueError, cval.StratifiedKFold, y, 1.5)
def test_kfold_indices():
# Check all indices are returned in the test folds
kf = cval.KFold(300, 3)
check_cv_coverage(kf, expected_n_iter=3, n_samples=300)
# Check all indices are returned in the test folds even when equal-sized
# folds are not possible
kf = cval.KFold(17, 3)
check_cv_coverage(kf, expected_n_iter=3, n_samples=17)
def test_kfold_no_shuffle():
# Manually check that KFold preserves the data ordering on toy datasets
splits = iter(cval.KFold(4, 2))
train, test = next(splits)
assert_array_equal(test, [0, 1])
assert_array_equal(train, [2, 3])
train, test = next(splits)
assert_array_equal(test, [2, 3])
assert_array_equal(train, [0, 1])
splits = iter(cval.KFold(5, 2))
train, test = next(splits)
assert_array_equal(test, [0, 1, 2])
assert_array_equal(train, [3, 4])
train, test = next(splits)
assert_array_equal(test, [3, 4])
assert_array_equal(train, [0, 1, 2])
def test_stratified_kfold_no_shuffle():
# Manually check that StratifiedKFold preserves the data ordering as much
# as possible on toy datasets in order to avoid hiding sample dependencies
# when possible
splits = iter(cval.StratifiedKFold([1, 1, 0, 0], 2))
train, test = next(splits)
assert_array_equal(test, [0, 2])
assert_array_equal(train, [1, 3])
train, test = next(splits)
assert_array_equal(test, [1, 3])
assert_array_equal(train, [0, 2])
splits = iter(cval.StratifiedKFold([1, 1, 1, 0, 0, 0, 0], 2))
train, test = next(splits)
assert_array_equal(test, [0, 1, 3, 4])
assert_array_equal(train, [2, 5, 6])
train, test = next(splits)
assert_array_equal(test, [2, 5, 6])
assert_array_equal(train, [0, 1, 3, 4])
def test_stratified_kfold_ratios():
# Check that stratified kfold preserves label ratios in individual splits
# Repeat with shuffling turned off and on
n_samples = 1000
labels = np.array([4] * int(0.10 * n_samples) +
[0] * int(0.89 * n_samples) +
[1] * int(0.01 * n_samples))
for shuffle in [False, True]:
for train, test in cval.StratifiedKFold(labels, 5, shuffle=shuffle):
assert_almost_equal(np.sum(labels[train] == 4) / len(train), 0.10,
2)
assert_almost_equal(np.sum(labels[train] == 0) / len(train), 0.89,
2)
assert_almost_equal(np.sum(labels[train] == 1) / len(train), 0.01,
2)
assert_almost_equal(np.sum(labels[test] == 4) / len(test), 0.10, 2)
assert_almost_equal(np.sum(labels[test] == 0) / len(test), 0.89, 2)
assert_almost_equal(np.sum(labels[test] == 1) / len(test), 0.01, 2)
def test_kfold_balance():
# Check that KFold returns folds with balanced sizes
for kf in [cval.KFold(i, 5) for i in range(11, 17)]:
sizes = []
for _, test in kf:
sizes.append(len(test))
assert_true((np.max(sizes) - np.min(sizes)) <= 1)
assert_equal(np.sum(sizes), kf.n)
def test_stratifiedkfold_balance():
# Check that KFold returns folds with balanced sizes (only when
# stratification is possible)
# Repeat with shuffling turned off and on
labels = [0] * 3 + [1] * 14
for shuffle in [False, True]:
for skf in [cval.StratifiedKFold(labels[:i], 3, shuffle=shuffle)
for i in range(11, 17)]:
sizes = []
for _, test in skf:
sizes.append(len(test))
assert_true((np.max(sizes) - np.min(sizes)) <= 1)
assert_equal(np.sum(sizes), skf.n)
def test_shuffle_kfold():
# Check the indices are shuffled properly, and that all indices are
# returned in the different test folds
kf = cval.KFold(300, 3, shuffle=True, random_state=0)
ind = np.arange(300)
all_folds = None
for train, test in kf:
assert_true(np.any(np.arange(100) != ind[test]))
assert_true(np.any(np.arange(100, 200) != ind[test]))
assert_true(np.any(np.arange(200, 300) != ind[test]))
if all_folds is None:
all_folds = ind[test].copy()
else:
all_folds = np.concatenate((all_folds, ind[test]))
all_folds.sort()
assert_array_equal(all_folds, ind)
def test_shuffle_stratifiedkfold():
# Check that shuffling is happening when requested, and for proper
# sample coverage
labels = [0] * 20 + [1] * 20
kf0 = list(cval.StratifiedKFold(labels, 5, shuffle=True, random_state=0))
kf1 = list(cval.StratifiedKFold(labels, 5, shuffle=True, random_state=1))
for (_, test0), (_, test1) in zip(kf0, kf1):
assert_true(set(test0) != set(test1))
check_cv_coverage(kf0, expected_n_iter=5, n_samples=40)
def test_kfold_can_detect_dependent_samples_on_digits(): # see #2372
# The digits samples are dependent: they are apparently grouped by authors
# although we don't have any information on the groups segment locations
# for this data. We can highlight this fact be computing k-fold cross-
# validation with and without shuffling: we observe that the shuffling case
# wrongly makes the IID assumption and is therefore too optimistic: it
# estimates a much higher accuracy (around 0.96) than than the non
# shuffling variant (around 0.86).
digits = load_digits()
X, y = digits.data[:800], digits.target[:800]
model = SVC(C=10, gamma=0.005)
n = len(y)
cv = cval.KFold(n, 5, shuffle=False)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(0.88, mean_score)
assert_greater(mean_score, 0.85)
# Shuffling the data artificially breaks the dependency and hides the
# overfitting of the model with regards to the writing style of the authors
# by yielding a seriously overestimated score:
cv = cval.KFold(n, 5, shuffle=True, random_state=0)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(mean_score, 0.95)
cv = cval.KFold(n, 5, shuffle=True, random_state=1)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(mean_score, 0.95)
# Similarly, StratifiedKFold should try to shuffle the data as little
# as possible (while respecting the balanced class constraints)
# and thus be able to detect the dependency by not overestimating
# the CV score either. As the digits dataset is approximately balanced
# the estimated mean score is close to the score measured with
# non-shuffled KFold
cv = cval.StratifiedKFold(y, 5)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(0.88, mean_score)
assert_greater(mean_score, 0.85)
def test_label_kfold():
rng = np.random.RandomState(0)
# Parameters of the test
n_labels = 15
n_samples = 1000
n_folds = 5
# Construct the test data
tolerance = 0.05 * n_samples # 5 percent error allowed
labels = rng.randint(0, n_labels, n_samples)
folds = cval.LabelKFold(labels, n_folds=n_folds).idxs
ideal_n_labels_per_fold = n_samples // n_folds
# Check that folds have approximately the same size
assert_equal(len(folds), len(labels))
for i in np.unique(folds):
assert_greater_equal(tolerance,
abs(sum(folds == i) - ideal_n_labels_per_fold))
# Check that each label appears only in 1 fold
for label in np.unique(labels):
assert_equal(len(np.unique(folds[labels == label])), 1)
# Check that no label is on both sides of the split
labels = np.asarray(labels, dtype=object)
for train, test in cval.LabelKFold(labels, n_folds=n_folds):
assert_equal(len(np.intersect1d(labels[train], labels[test])), 0)
# Construct the test data
labels = ['Albert', 'Jean', 'Bertrand', 'Michel', 'Jean',
'Francis', 'Robert', 'Michel', 'Rachel', 'Lois',
'Michelle', 'Bernard', 'Marion', 'Laura', 'Jean',
'Rachel', 'Franck', 'John', 'Gael', 'Anna', 'Alix',
'Robert', 'Marion', 'David', 'Tony', 'Abel', 'Becky',
'Madmood', 'Cary', 'Mary', 'Alexandre', 'David', 'Francis',
'Barack', 'Abdoul', 'Rasha', 'Xi', 'Silvia']
labels = np.asarray(labels, dtype=object)
n_labels = len(np.unique(labels))
n_samples = len(labels)
n_folds = 5
tolerance = 0.05 * n_samples # 5 percent error allowed
folds = cval.LabelKFold(labels, n_folds=n_folds).idxs
ideal_n_labels_per_fold = n_samples // n_folds
# Check that folds have approximately the same size
assert_equal(len(folds), len(labels))
for i in np.unique(folds):
assert_greater_equal(tolerance,
abs(sum(folds == i) - ideal_n_labels_per_fold))
# Check that each label appears only in 1 fold
for label in np.unique(labels):
assert_equal(len(np.unique(folds[labels == label])), 1)
# Check that no label is on both sides of the split
for train, test in cval.LabelKFold(labels, n_folds=n_folds):
assert_equal(len(np.intersect1d(labels[train], labels[test])), 0)
# Should fail if there are more folds than labels
labels = np.array([1, 1, 1, 2, 2])
assert_raises(ValueError, cval.LabelKFold, labels, n_folds=3)
def test_shuffle_split():
ss1 = cval.ShuffleSplit(10, test_size=0.2, random_state=0)
ss2 = cval.ShuffleSplit(10, test_size=2, random_state=0)
ss3 = cval.ShuffleSplit(10, test_size=np.int32(2), random_state=0)
for typ in six.integer_types:
ss4 = cval.ShuffleSplit(10, test_size=typ(2), random_state=0)
for t1, t2, t3, t4 in zip(ss1, ss2, ss3, ss4):
assert_array_equal(t1[0], t2[0])
assert_array_equal(t2[0], t3[0])
assert_array_equal(t3[0], t4[0])
assert_array_equal(t1[1], t2[1])
assert_array_equal(t2[1], t3[1])
assert_array_equal(t3[1], t4[1])
def test_stratified_shuffle_split_init():
y = np.asarray([0, 1, 1, 1, 2, 2, 2])
# Check that error is raised if there is a class with only one sample
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.2)
# Check that error is raised if the test set size is smaller than n_classes
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 2)
# Check that error is raised if the train set size is smaller than
# n_classes
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 3, 2)
y = np.asarray([0, 0, 0, 1, 1, 1, 2, 2, 2])
# Check that errors are raised if there is not enough samples
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.5, 0.6)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 8, 0.6)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.6, 8)
# Train size or test size too small
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, train_size=2)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, test_size=2)
def test_stratified_shuffle_split_iter():
ys = [np.array([1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3]),
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]),
np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2]),
np.array([1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4]),
np.array([-1] * 800 + [1] * 50)
]
for y in ys:
sss = cval.StratifiedShuffleSplit(y, 6, test_size=0.33,
random_state=0)
for train, test in sss:
assert_array_equal(np.unique(y[train]), np.unique(y[test]))
# Checks if folds keep classes proportions
p_train = (np.bincount(np.unique(y[train], return_inverse=True)[1])
/ float(len(y[train])))
p_test = (np.bincount(np.unique(y[test], return_inverse=True)[1])
/ float(len(y[test])))
assert_array_almost_equal(p_train, p_test, 1)
assert_equal(y[train].size + y[test].size, y.size)
assert_array_equal(np.intersect1d(train, test), [])
def test_stratified_shuffle_split_even():
# Test the StratifiedShuffleSplit, indices are drawn with a
# equal chance
n_folds = 5
n_iter = 1000
def assert_counts_are_ok(idx_counts, p):
# Here we test that the distribution of the counts
# per index is close enough to a binomial
threshold = 0.05 / n_splits
bf = stats.binom(n_splits, p)
for count in idx_counts:
p = bf.pmf(count)
assert_true(p > threshold,
"An index is not drawn with chance corresponding "
"to even draws")
for n_samples in (6, 22):
labels = np.array((n_samples // 2) * [0, 1])
splits = cval.StratifiedShuffleSplit(labels, n_iter=n_iter,
test_size=1. / n_folds,
random_state=0)
train_counts = [0] * n_samples
test_counts = [0] * n_samples
n_splits = 0
for train, test in splits:
n_splits += 1
for counter, ids in [(train_counts, train), (test_counts, test)]:
for id in ids:
counter[id] += 1
assert_equal(n_splits, n_iter)
assert_equal(len(train), splits.n_train)
assert_equal(len(test), splits.n_test)
assert_equal(len(set(train).intersection(test)), 0)
label_counts = np.unique(labels)
assert_equal(splits.test_size, 1.0 / n_folds)
assert_equal(splits.n_train + splits.n_test, len(labels))
assert_equal(len(label_counts), 2)
ex_test_p = float(splits.n_test) / n_samples
ex_train_p = float(splits.n_train) / n_samples
assert_counts_are_ok(train_counts, ex_train_p)
assert_counts_are_ok(test_counts, ex_test_p)
def test_stratified_shuffle_split_overlap_train_test_bug():
# See https://github.com/scikit-learn/scikit-learn/issues/6121 for
# the original bug report
labels = [0, 1, 2, 3] * 3 + [4, 5] * 5
splits = cval.StratifiedShuffleSplit(labels, n_iter=1,
test_size=0.5, random_state=0)
train, test = next(iter(splits))
assert_array_equal(np.intersect1d(train, test), [])
def test_predefinedsplit_with_kfold_split():
# Check that PredefinedSplit can reproduce a split generated by Kfold.
folds = -1 * np.ones(10)
kf_train = []
kf_test = []
for i, (train_ind, test_ind) in enumerate(cval.KFold(10, 5, shuffle=True)):
kf_train.append(train_ind)
kf_test.append(test_ind)
folds[test_ind] = i
ps_train = []
ps_test = []
ps = cval.PredefinedSplit(folds)
for train_ind, test_ind in ps:
ps_train.append(train_ind)
ps_test.append(test_ind)
assert_array_equal(ps_train, kf_train)
assert_array_equal(ps_test, kf_test)
def test_label_shuffle_split():
ys = [np.array([1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3]),
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]),
np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2]),
np.array([1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4]),
]
for y in ys:
n_iter = 6
test_size = 1. / 3
slo = cval.LabelShuffleSplit(y, n_iter, test_size=test_size,
random_state=0)
# Make sure the repr works
repr(slo)
# Test that the length is correct
assert_equal(len(slo), n_iter)
y_unique = np.unique(y)
for train, test in slo:
# First test: no train label is in the test set and vice versa
y_train_unique = np.unique(y[train])
y_test_unique = np.unique(y[test])
assert_false(np.any(np.in1d(y[train], y_test_unique)))
assert_false(np.any(np.in1d(y[test], y_train_unique)))
# Second test: train and test add up to all the data
assert_equal(y[train].size + y[test].size, y.size)
# Third test: train and test are disjoint
assert_array_equal(np.intersect1d(train, test), [])
# Fourth test: # unique train and test labels are correct,
# +- 1 for rounding error
assert_true(abs(len(y_test_unique) -
round(test_size * len(y_unique))) <= 1)
assert_true(abs(len(y_train_unique) -
round((1.0 - test_size) * len(y_unique))) <= 1)
def test_leave_label_out_changing_labels():
# Check that LeaveOneLabelOut and LeavePLabelOut work normally if
# the labels variable is changed before calling __iter__
labels = np.array([0, 1, 2, 1, 1, 2, 0, 0])
labels_changing = np.array(labels, copy=True)
lolo = cval.LeaveOneLabelOut(labels)
lolo_changing = cval.LeaveOneLabelOut(labels_changing)
lplo = cval.LeavePLabelOut(labels, p=2)
lplo_changing = cval.LeavePLabelOut(labels_changing, p=2)
labels_changing[:] = 0
for llo, llo_changing in [(lolo, lolo_changing), (lplo, lplo_changing)]:
for (train, test), (train_chan, test_chan) in zip(llo, llo_changing):
assert_array_equal(train, train_chan)
assert_array_equal(test, test_chan)
def test_cross_val_score():
clf = MockClassifier()
for a in range(-10, 10):
clf.a = a
# Smoke test
scores = cval.cross_val_score(clf, X, y)
assert_array_equal(scores, clf.score(X, y))
# test with multioutput y
scores = cval.cross_val_score(clf, X_sparse, X)
assert_array_equal(scores, clf.score(X_sparse, X))
scores = cval.cross_val_score(clf, X_sparse, y)
assert_array_equal(scores, clf.score(X_sparse, y))
# test with multioutput y
scores = cval.cross_val_score(clf, X_sparse, X)
assert_array_equal(scores, clf.score(X_sparse, X))
# test with X and y as list
list_check = lambda x: isinstance(x, list)
clf = CheckingClassifier(check_X=list_check)
scores = cval.cross_val_score(clf, X.tolist(), y.tolist())
clf = CheckingClassifier(check_y=list_check)
scores = cval.cross_val_score(clf, X, y.tolist())
assert_raises(ValueError, cval.cross_val_score, clf, X, y,
scoring="sklearn")
# test with 3d X and
X_3d = X[:, :, np.newaxis]
clf = MockClassifier(allow_nd=True)
scores = cval.cross_val_score(clf, X_3d, y)
clf = MockClassifier(allow_nd=False)
assert_raises(ValueError, cval.cross_val_score, clf, X_3d, y)
def test_cross_val_score_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((Series, DataFrame))
except ImportError:
pass
for TargetType, InputFeatureType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
cval.cross_val_score(clf, X_df, y_ser)
def test_cross_val_score_mask():
# test that cross_val_score works with boolean masks
svm = SVC(kernel="linear")
iris = load_iris()
X, y = iris.data, iris.target
cv_indices = cval.KFold(len(y), 5)
scores_indices = cval.cross_val_score(svm, X, y, cv=cv_indices)
cv_indices = cval.KFold(len(y), 5)
cv_masks = []
for train, test in cv_indices:
mask_train = np.zeros(len(y), dtype=np.bool)
mask_test = np.zeros(len(y), dtype=np.bool)
mask_train[train] = 1
mask_test[test] = 1
cv_masks.append((train, test))
scores_masks = cval.cross_val_score(svm, X, y, cv=cv_masks)
assert_array_equal(scores_indices, scores_masks)
def test_cross_val_score_precomputed():
# test for svm with precomputed kernel
svm = SVC(kernel="precomputed")
iris = load_iris()
X, y = iris.data, iris.target
linear_kernel = np.dot(X, X.T)
score_precomputed = cval.cross_val_score(svm, linear_kernel, y)
svm = SVC(kernel="linear")
score_linear = cval.cross_val_score(svm, X, y)
assert_array_equal(score_precomputed, score_linear)
# Error raised for non-square X
svm = SVC(kernel="precomputed")
assert_raises(ValueError, cval.cross_val_score, svm, X, y)
# test error is raised when the precomputed kernel is not array-like
# or sparse
assert_raises(ValueError, cval.cross_val_score, svm,
linear_kernel.tolist(), y)
def test_cross_val_score_fit_params():
clf = MockClassifier()
n_samples = X.shape[0]
n_classes = len(np.unique(y))
DUMMY_INT = 42
DUMMY_STR = '42'
DUMMY_OBJ = object()
def assert_fit_params(clf):
# Function to test that the values are passed correctly to the
# classifier arguments for non-array type
assert_equal(clf.dummy_int, DUMMY_INT)
assert_equal(clf.dummy_str, DUMMY_STR)
assert_equal(clf.dummy_obj, DUMMY_OBJ)
fit_params = {'sample_weight': np.ones(n_samples),
'class_prior': np.ones(n_classes) / n_classes,
'sparse_sample_weight': W_sparse,
'sparse_param': P_sparse,
'dummy_int': DUMMY_INT,
'dummy_str': DUMMY_STR,
'dummy_obj': DUMMY_OBJ,
'callback': assert_fit_params}
cval.cross_val_score(clf, X, y, fit_params=fit_params)
def test_cross_val_score_score_func():
clf = MockClassifier()
_score_func_args = []
def score_func(y_test, y_predict):
_score_func_args.append((y_test, y_predict))
return 1.0
with warnings.catch_warnings(record=True):
scoring = make_scorer(score_func)
score = cval.cross_val_score(clf, X, y, scoring=scoring)
assert_array_equal(score, [1.0, 1.0, 1.0])
assert len(_score_func_args) == 3
def test_cross_val_score_errors():
class BrokenEstimator:
pass
assert_raises(TypeError, cval.cross_val_score, BrokenEstimator(), X)
def test_train_test_split_errors():
assert_raises(ValueError, cval.train_test_split)
assert_raises(ValueError, cval.train_test_split, range(3), train_size=1.1)
assert_raises(ValueError, cval.train_test_split, range(3), test_size=0.6,
train_size=0.6)
assert_raises(ValueError, cval.train_test_split, range(3),
test_size=np.float32(0.6), train_size=np.float32(0.6))
assert_raises(ValueError, cval.train_test_split, range(3),
test_size="wrong_type")
assert_raises(ValueError, cval.train_test_split, range(3), test_size=2,
train_size=4)
assert_raises(TypeError, cval.train_test_split, range(3),
some_argument=1.1)
assert_raises(ValueError, cval.train_test_split, range(3), range(42))
def test_train_test_split():
X = np.arange(100).reshape((10, 10))
X_s = coo_matrix(X)
y = np.arange(10)
# simple test
split = cval.train_test_split(X, y, test_size=None, train_size=.5)
X_train, X_test, y_train, y_test = split
assert_equal(len(y_test), len(y_train))
# test correspondence of X and y
assert_array_equal(X_train[:, 0], y_train * 10)
assert_array_equal(X_test[:, 0], y_test * 10)
# conversion of lists to arrays (deprecated?)
with warnings.catch_warnings(record=True):
split = cval.train_test_split(X, X_s, y.tolist())
X_train, X_test, X_s_train, X_s_test, y_train, y_test = split
assert_array_equal(X_train, X_s_train.toarray())
assert_array_equal(X_test, X_s_test.toarray())
# don't convert lists to anything else by default
split = cval.train_test_split(X, X_s, y.tolist())
X_train, X_test, X_s_train, X_s_test, y_train, y_test = split
assert_true(isinstance(y_train, list))
assert_true(isinstance(y_test, list))
# allow nd-arrays
X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2)
y_3d = np.arange(10 * 7 * 11).reshape(10, 7, 11)
split = cval.train_test_split(X_4d, y_3d)
assert_equal(split[0].shape, (7, 5, 3, 2))
assert_equal(split[1].shape, (3, 5, 3, 2))
assert_equal(split[2].shape, (7, 7, 11))
assert_equal(split[3].shape, (3, 7, 11))
# test stratification option
y = np.array([1, 1, 1, 1, 2, 2, 2, 2])
for test_size, exp_test_size in zip([2, 4, 0.25, 0.5, 0.75],
[2, 4, 2, 4, 6]):
train, test = cval.train_test_split(y,
test_size=test_size,
stratify=y,
random_state=0)
assert_equal(len(test), exp_test_size)
assert_equal(len(test) + len(train), len(y))
# check the 1:1 ratio of ones and twos in the data is preserved
assert_equal(np.sum(train == 1), np.sum(train == 2))
def train_test_split_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [MockDataFrame]
try:
from pandas import DataFrame
types.append(DataFrame)
except ImportError:
pass
for InputFeatureType in types:
# X dataframe
X_df = InputFeatureType(X)
X_train, X_test = cval.train_test_split(X_df)
assert_true(isinstance(X_train, InputFeatureType))
assert_true(isinstance(X_test, InputFeatureType))
def train_test_split_mock_pandas():
# X mock dataframe
X_df = MockDataFrame(X)
X_train, X_test = cval.train_test_split(X_df)
assert_true(isinstance(X_train, MockDataFrame))
assert_true(isinstance(X_test, MockDataFrame))
def test_cross_val_score_with_score_func_classification():
iris = load_iris()
clf = SVC(kernel='linear')
# Default score (should be the accuracy score)
scores = cval.cross_val_score(clf, iris.data, iris.target, cv=5)
assert_array_almost_equal(scores, [0.97, 1., 0.97, 0.97, 1.], 2)
# Correct classification score (aka. zero / one score) - should be the
# same as the default estimator score
zo_scores = cval.cross_val_score(clf, iris.data, iris.target,
scoring="accuracy", cv=5)
assert_array_almost_equal(zo_scores, [0.97, 1., 0.97, 0.97, 1.], 2)
# F1 score (class are balanced so f1_score should be equal to zero/one
# score
f1_scores = cval.cross_val_score(clf, iris.data, iris.target,
scoring="f1_weighted", cv=5)
assert_array_almost_equal(f1_scores, [0.97, 1., 0.97, 0.97, 1.], 2)
def test_cross_val_score_with_score_func_regression():
X, y = make_regression(n_samples=30, n_features=20, n_informative=5,
random_state=0)
reg = Ridge()
# Default score of the Ridge regression estimator
scores = cval.cross_val_score(reg, X, y, cv=5)
assert_array_almost_equal(scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
# R2 score (aka. determination coefficient) - should be the
# same as the default estimator score
r2_scores = cval.cross_val_score(reg, X, y, scoring="r2", cv=5)
assert_array_almost_equal(r2_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
# Mean squared error; this is a loss function, so "scores" are negative
mse_scores = cval.cross_val_score(reg, X, y, cv=5,
scoring="mean_squared_error")
expected_mse = np.array([-763.07, -553.16, -274.38, -273.26, -1681.99])
assert_array_almost_equal(mse_scores, expected_mse, 2)
# Explained variance
scoring = make_scorer(explained_variance_score)
ev_scores = cval.cross_val_score(reg, X, y, cv=5, scoring=scoring)
assert_array_almost_equal(ev_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
def test_permutation_score():
iris = load_iris()
X = iris.data
X_sparse = coo_matrix(X)
y = iris.target
svm = SVC(kernel='linear')
cv = cval.StratifiedKFold(y, 2)
score, scores, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy")
assert_greater(score, 0.9)
assert_almost_equal(pvalue, 0.0, 1)
score_label, _, pvalue_label = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy",
labels=np.ones(y.size), random_state=0)
assert_true(score_label == score)
assert_true(pvalue_label == pvalue)
# check that we obtain the same results with a sparse representation
svm_sparse = SVC(kernel='linear')
cv_sparse = cval.StratifiedKFold(y, 2)
score_label, _, pvalue_label = cval.permutation_test_score(
svm_sparse, X_sparse, y, n_permutations=30, cv=cv_sparse,
scoring="accuracy", labels=np.ones(y.size), random_state=0)
assert_true(score_label == score)
assert_true(pvalue_label == pvalue)
# test with custom scoring object
def custom_score(y_true, y_pred):
return (((y_true == y_pred).sum() - (y_true != y_pred).sum())
/ y_true.shape[0])
scorer = make_scorer(custom_score)
score, _, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=100, scoring=scorer, cv=cv, random_state=0)
assert_almost_equal(score, .93, 2)
assert_almost_equal(pvalue, 0.01, 3)
# set random y
y = np.mod(np.arange(len(y)), 3)
score, scores, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy")
assert_less(score, 0.5)
assert_greater(pvalue, 0.2)
def test_cross_val_generator_with_indices():
X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
y = np.array([1, 1, 2, 2])
labels = np.array([1, 2, 3, 4])
# explicitly passing indices value is deprecated
loo = cval.LeaveOneOut(4)
lpo = cval.LeavePOut(4, 2)
kf = cval.KFold(4, 2)
skf = cval.StratifiedKFold(y, 2)
lolo = cval.LeaveOneLabelOut(labels)
lopo = cval.LeavePLabelOut(labels, 2)
ps = cval.PredefinedSplit([1, 1, 2, 2])
ss = cval.ShuffleSplit(2)
for cv in [loo, lpo, kf, skf, lolo, lopo, ss, ps]:
for train, test in cv:
assert_not_equal(np.asarray(train).dtype.kind, 'b')
assert_not_equal(np.asarray(train).dtype.kind, 'b')
X[train], X[test]
y[train], y[test]
@ignore_warnings
def test_cross_val_generator_with_default_indices():
X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
y = np.array([1, 1, 2, 2])
labels = np.array([1, 2, 3, 4])
loo = cval.LeaveOneOut(4)
lpo = cval.LeavePOut(4, 2)
kf = cval.KFold(4, 2)
skf = cval.StratifiedKFold(y, 2)
lolo = cval.LeaveOneLabelOut(labels)
lopo = cval.LeavePLabelOut(labels, 2)
ss = cval.ShuffleSplit(2)
ps = cval.PredefinedSplit([1, 1, 2, 2])
for cv in [loo, lpo, kf, skf, lolo, lopo, ss, ps]:
for train, test in cv:
assert_not_equal(np.asarray(train).dtype.kind, 'b')
assert_not_equal(np.asarray(train).dtype.kind, 'b')
X[train], X[test]
y[train], y[test]
def test_shufflesplit_errors():
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=2.0)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=1.0)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=0.1,
train_size=0.95)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=11)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=10)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=8, train_size=3)
assert_raises(ValueError, cval.ShuffleSplit, 10, train_size=1j)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=None,
train_size=None)
def test_shufflesplit_reproducible():
# Check that iterating twice on the ShuffleSplit gives the same
# sequence of train-test when the random_state is given
ss = cval.ShuffleSplit(10, random_state=21)
assert_array_equal(list(a for a, b in ss), list(a for a, b in ss))
def test_safe_split_with_precomputed_kernel():
clf = SVC()
clfp = SVC(kernel="precomputed")
iris = load_iris()
X, y = iris.data, iris.target
K = np.dot(X, X.T)
cv = cval.ShuffleSplit(X.shape[0], test_size=0.25, random_state=0)
tr, te = list(cv)[0]
X_tr, y_tr = cval._safe_split(clf, X, y, tr)
K_tr, y_tr2 = cval._safe_split(clfp, K, y, tr)
assert_array_almost_equal(K_tr, np.dot(X_tr, X_tr.T))
X_te, y_te = cval._safe_split(clf, X, y, te, tr)
K_te, y_te2 = cval._safe_split(clfp, K, y, te, tr)
assert_array_almost_equal(K_te, np.dot(X_te, X_tr.T))
def test_cross_val_score_allow_nans():
# Check that cross_val_score allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
cval.cross_val_score(p, X, y, cv=5)
def test_train_test_split_allow_nans():
# Check that train_test_split allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
cval.train_test_split(X, y, test_size=0.2, random_state=42)
def test_permutation_test_score_allow_nans():
# Check that permutation_test_score allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
cval.permutation_test_score(p, X, y, cv=5)
def test_check_cv_return_types():
X = np.ones((9, 2))
cv = cval.check_cv(3, X, classifier=False)
assert_true(isinstance(cv, cval.KFold))
y_binary = np.array([0, 1, 0, 1, 0, 0, 1, 1, 1])
cv = cval.check_cv(3, X, y_binary, classifier=True)
assert_true(isinstance(cv, cval.StratifiedKFold))
y_multiclass = np.array([0, 1, 0, 1, 2, 1, 2, 0, 2])
cv = cval.check_cv(3, X, y_multiclass, classifier=True)
assert_true(isinstance(cv, cval.StratifiedKFold))
X = np.ones((5, 2))
y_multilabel = [[1, 0, 1], [1, 1, 0], [0, 0, 0], [0, 1, 1], [1, 0, 0]]
cv = cval.check_cv(3, X, y_multilabel, classifier=True)
assert_true(isinstance(cv, cval.KFold))
y_multioutput = np.array([[1, 2], [0, 3], [0, 0], [3, 1], [2, 0]])
cv = cval.check_cv(3, X, y_multioutput, classifier=True)
assert_true(isinstance(cv, cval.KFold))
def test_cross_val_score_multilabel():
X = np.array([[-3, 4], [2, 4], [3, 3], [0, 2], [-3, 1],
[-2, 1], [0, 0], [-2, -1], [-1, -2], [1, -2]])
y = np.array([[1, 1], [0, 1], [0, 1], [0, 1], [1, 1],
[0, 1], [1, 0], [1, 1], [1, 0], [0, 0]])
clf = KNeighborsClassifier(n_neighbors=1)
scoring_micro = make_scorer(precision_score, average='micro')
scoring_macro = make_scorer(precision_score, average='macro')
scoring_samples = make_scorer(precision_score, average='samples')
score_micro = cval.cross_val_score(clf, X, y, scoring=scoring_micro, cv=5)
score_macro = cval.cross_val_score(clf, X, y, scoring=scoring_macro, cv=5)
score_samples = cval.cross_val_score(clf, X, y,
scoring=scoring_samples, cv=5)
assert_almost_equal(score_micro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 3])
assert_almost_equal(score_macro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4])
assert_almost_equal(score_samples, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4])
def test_cross_val_predict():
boston = load_boston()
X, y = boston.data, boston.target
cv = cval.KFold(len(boston.target))
est = Ridge()
# Naive loop (should be same as cross_val_predict):
preds2 = np.zeros_like(y)
for train, test in cv:
est.fit(X[train], y[train])
preds2[test] = est.predict(X[test])
preds = cval.cross_val_predict(est, X, y, cv=cv)
assert_array_almost_equal(preds, preds2)
preds = cval.cross_val_predict(est, X, y)
assert_equal(len(preds), len(y))
cv = cval.LeaveOneOut(len(y))
preds = cval.cross_val_predict(est, X, y, cv=cv)
assert_equal(len(preds), len(y))
Xsp = X.copy()
Xsp *= (Xsp > np.median(Xsp))
Xsp = coo_matrix(Xsp)
preds = cval.cross_val_predict(est, Xsp, y)
assert_array_almost_equal(len(preds), len(y))
preds = cval.cross_val_predict(KMeans(), X)
assert_equal(len(preds), len(y))
def bad_cv():
for i in range(4):
yield np.array([0, 1, 2, 3]), np.array([4, 5, 6, 7, 8])
assert_raises(ValueError, cval.cross_val_predict, est, X, y, cv=bad_cv())
def test_cross_val_predict_input_types():
clf = Ridge()
# Smoke test
predictions = cval.cross_val_predict(clf, X, y)
assert_equal(predictions.shape, (10,))
# test with multioutput y
predictions = cval.cross_val_predict(clf, X_sparse, X)
assert_equal(predictions.shape, (10, 2))
predictions = cval.cross_val_predict(clf, X_sparse, y)
assert_array_equal(predictions.shape, (10,))
# test with multioutput y
predictions = cval.cross_val_predict(clf, X_sparse, X)
assert_array_equal(predictions.shape, (10, 2))
# test with X and y as list
list_check = lambda x: isinstance(x, list)
clf = CheckingClassifier(check_X=list_check)
predictions = cval.cross_val_predict(clf, X.tolist(), y.tolist())
clf = CheckingClassifier(check_y=list_check)
predictions = cval.cross_val_predict(clf, X, y.tolist())
# test with 3d X and
X_3d = X[:, :, np.newaxis]
check_3d = lambda x: x.ndim == 3
clf = CheckingClassifier(check_X=check_3d)
predictions = cval.cross_val_predict(clf, X_3d, y)
assert_array_equal(predictions.shape, (10,))
def test_cross_val_predict_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((Series, DataFrame))
except ImportError:
pass
for TargetType, InputFeatureType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
cval.cross_val_predict(clf, X_df, y_ser)
def test_sparse_fit_params():
iris = load_iris()
X, y = iris.data, iris.target
clf = MockClassifier()
fit_params = {'sparse_sample_weight': coo_matrix(np.eye(X.shape[0]))}
a = cval.cross_val_score(clf, X, y, fit_params=fit_params)
assert_array_equal(a, np.ones(3))
def test_check_is_partition():
p = np.arange(100)
assert_true(cval._check_is_partition(p, 100))
assert_false(cval._check_is_partition(np.delete(p, 23), 100))
p[0] = 23
assert_false(cval._check_is_partition(p, 100))
def test_cross_val_predict_sparse_prediction():
# check that cross_val_predict gives same result for sparse and dense input
X, y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=False,
return_indicator=True,
random_state=1)
X_sparse = csr_matrix(X)
y_sparse = csr_matrix(y)
classif = OneVsRestClassifier(SVC(kernel='linear'))
preds = cval.cross_val_predict(classif, X, y, cv=10)
preds_sparse = cval.cross_val_predict(classif, X_sparse, y_sparse, cv=10)
preds_sparse = preds_sparse.toarray()
assert_array_almost_equal(preds_sparse, preds)
| bsd-3-clause |
aflaxman/scikit-learn | sklearn/manifold/tests/test_isomap.py | 121 | 4301 | from itertools import product
import numpy as np
from numpy.testing import (assert_almost_equal, assert_array_almost_equal,
assert_equal)
from sklearn import datasets
from sklearn import manifold
from sklearn import neighbors
from sklearn import pipeline
from sklearn import preprocessing
from sklearn.utils.testing import assert_less
eigen_solvers = ['auto', 'dense', 'arpack']
path_methods = ['auto', 'FW', 'D']
def test_isomap_simple_grid():
# Isomap should preserve distances when all neighbors are used
N_per_side = 5
Npts = N_per_side ** 2
n_neighbors = Npts - 1
# grid of equidistant points in 2D, n_components = n_dim
X = np.array(list(product(range(N_per_side), repeat=2)))
# distances from each point to all others
G = neighbors.kneighbors_graph(X, n_neighbors,
mode='distance').toarray()
for eigen_solver in eigen_solvers:
for path_method in path_methods:
clf = manifold.Isomap(n_neighbors=n_neighbors, n_components=2,
eigen_solver=eigen_solver,
path_method=path_method)
clf.fit(X)
G_iso = neighbors.kneighbors_graph(clf.embedding_,
n_neighbors,
mode='distance').toarray()
assert_array_almost_equal(G, G_iso)
def test_isomap_reconstruction_error():
# Same setup as in test_isomap_simple_grid, with an added dimension
N_per_side = 5
Npts = N_per_side ** 2
n_neighbors = Npts - 1
# grid of equidistant points in 2D, n_components = n_dim
X = np.array(list(product(range(N_per_side), repeat=2)))
# add noise in a third dimension
rng = np.random.RandomState(0)
noise = 0.1 * rng.randn(Npts, 1)
X = np.concatenate((X, noise), 1)
# compute input kernel
G = neighbors.kneighbors_graph(X, n_neighbors,
mode='distance').toarray()
centerer = preprocessing.KernelCenterer()
K = centerer.fit_transform(-0.5 * G ** 2)
for eigen_solver in eigen_solvers:
for path_method in path_methods:
clf = manifold.Isomap(n_neighbors=n_neighbors, n_components=2,
eigen_solver=eigen_solver,
path_method=path_method)
clf.fit(X)
# compute output kernel
G_iso = neighbors.kneighbors_graph(clf.embedding_,
n_neighbors,
mode='distance').toarray()
K_iso = centerer.fit_transform(-0.5 * G_iso ** 2)
# make sure error agrees
reconstruction_error = np.linalg.norm(K - K_iso) / Npts
assert_almost_equal(reconstruction_error,
clf.reconstruction_error())
def test_transform():
n_samples = 200
n_components = 10
noise_scale = 0.01
# Create S-curve dataset
X, y = datasets.samples_generator.make_s_curve(n_samples, random_state=0)
# Compute isomap embedding
iso = manifold.Isomap(n_components, 2)
X_iso = iso.fit_transform(X)
# Re-embed a noisy version of the points
rng = np.random.RandomState(0)
noise = noise_scale * rng.randn(*X.shape)
X_iso2 = iso.transform(X + noise)
# Make sure the rms error on re-embedding is comparable to noise_scale
assert_less(np.sqrt(np.mean((X_iso - X_iso2) ** 2)), 2 * noise_scale)
def test_pipeline():
# check that Isomap works fine as a transformer in a Pipeline
# only checks that no error is raised.
# TODO check that it actually does something useful
X, y = datasets.make_blobs(random_state=0)
clf = pipeline.Pipeline(
[('isomap', manifold.Isomap()),
('clf', neighbors.KNeighborsClassifier())])
clf.fit(X, y)
assert_less(.9, clf.score(X, y))
def test_isomap_clone_bug():
# regression test for bug reported in #6062
model = manifold.Isomap()
for n_neighbors in [10, 15, 20]:
model.set_params(n_neighbors=n_neighbors)
model.fit(np.random.rand(50, 2))
assert_equal(model.nbrs_.n_neighbors,
n_neighbors)
| bsd-3-clause |
ajylee/gpaw-rtxs | gpaw/test/big/bader_water/bader_plot.py | 2 | 2572 | #*******************PYLAB STUFF************************************************
from matplotlib import rc # This must be before "imort pylab"
rc('font', **{'family':'serif','sans-serif':['cm'],'serif':['cm']})
rc('text', usetex=True)
import pylab as pl
width_pt = 360.0 # Get this from LaTeX using \showthe\columnwidth
inch = 1.0 / 72.27 # Convert pt to inch
golden_mean = (pl.sqrt(5)-1.0) / 2.0 # Aesthetic ratio
width_inch = width_pt * inch # width in inches
height_inch = width_inch * golden_mean # height in inches
figsize = [width_inch, height_inch]
params = {'lines.linewidth': 1.2,
'font.size': 10,
'figure.figsize': figsize,
'figure.dpi': 200,
'savefig.dpi': 200,
'figure.subplot.right': 1.0,
'figure.subplot.top': 0.88,
'figure.subplot.left': 0.0,
'figure.subplot.bottom': 0.0,
'figure.subplot.wspace': 0.0,
'figure.subplot.hspace': 0.0,
'font.size': 10.0,
'legend.fontsize': 'medium',
'legend.loc': 'upper right',
}
pl.rcParams.update(params)
#*******************PYLAB STUFF************************************************
from ase.io.cube import read_cube
import numpy as np
# Contour values and colors
vals = np.linspace(0.1, 1.8, 16)**2
vals = list(vals) + [99,]
colors = pl.cm.Reds(np.linspace(0.15, 1.0, len(vals)))
#Pseudo density
nt, atoms = read_cube('water_pseudo_density.cube', read_data=True)
x = len(nt) // 2
nt = nt[x]
# All electron density and bader volumes
n, atoms = read_cube('water_density.cube', read_data=True)
#bader, atoms2 = read_cube('AtIndex.cube', read_data=True)
x = len(n) // 2
n = n[x]
#bader = bader[x]
# plot
fig = pl.figure(figsize=(6.2, 3))
pl.subplot(121)
pl.contourf(nt.T, vals, origin='lower', extend='neither', colors=colors)
pl.axis('equal')
pl.axis([52-25, 52+25, 52-25, 52+25])
pl.axis('off')
pl.text(52.5, 55, '$7.07e$', size=20, ha='center', va='center')
pl.title('Pseudo density', size=20)
pl.subplot(122)
pl.contourf(n.T, vals, colors=colors,
origin='lower', extend='neither')
#pl.contour(bader.T, [1.5], origin='lower', extend='neither', colors='k')
pl.axis('equal')
pl.axis([104-50, 104+50, 104-50, 104+50])
pl.axis('off')
pl.text(104.0, 112.0, '$9.12e$', size=20, ha='center', va='center')
pl.text( 86.5, 97.5, '$0.44e$', size=20, ha='right', va='center')
pl.text(122.0, 97.5, '$0.44e$', size=20, ha='left', va='center')
pl.title('All-electron density', size=20)
pl.savefig('water_divide_surf.eps')
pl.show()
| gpl-3.0 |
gfyoung/pandas | pandas/tests/indexes/timedeltas/test_indexing.py | 2 | 10043 | from datetime import datetime, timedelta
import re
import numpy as np
import pytest
import pandas as pd
from pandas import Index, Timedelta, TimedeltaIndex, notna, timedelta_range
import pandas._testing as tm
class TestGetItem:
def test_ellipsis(self):
# GH#21282
idx = timedelta_range("1 day", "31 day", freq="D", name="idx")
result = idx[...]
assert result.equals(idx)
assert result is not idx
def test_getitem_slice_keeps_name(self):
# GH#4226
tdi = timedelta_range("1d", "5d", freq="H", name="timebucket")
assert tdi[1:].name == tdi.name
def test_getitem(self):
idx1 = timedelta_range("1 day", "31 day", freq="D", name="idx")
for idx in [idx1]:
result = idx[0]
assert result == Timedelta("1 day")
result = idx[0:5]
expected = timedelta_range("1 day", "5 day", freq="D", name="idx")
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
result = idx[0:10:2]
expected = timedelta_range("1 day", "9 day", freq="2D", name="idx")
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
result = idx[-20:-5:3]
expected = timedelta_range("12 day", "24 day", freq="3D", name="idx")
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
result = idx[4::-1]
expected = TimedeltaIndex(
["5 day", "4 day", "3 day", "2 day", "1 day"], freq="-1D", name="idx"
)
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
@pytest.mark.parametrize(
"key",
[
pd.Timestamp("1970-01-01"),
pd.Timestamp("1970-01-02"),
datetime(1970, 1, 1),
pd.Timestamp("1970-01-03").to_datetime64(),
# non-matching NA values
np.datetime64("NaT"),
],
)
def test_timestamp_invalid_key(self, key):
# GH#20464
tdi = timedelta_range(0, periods=10)
with pytest.raises(KeyError, match=re.escape(repr(key))):
tdi.get_loc(key)
class TestGetLoc:
def test_get_loc(self):
idx = pd.to_timedelta(["0 days", "1 days", "2 days"])
for method in [None, "pad", "backfill", "nearest"]:
assert idx.get_loc(idx[1], method) == 1
assert idx.get_loc(idx[1].to_pytimedelta(), method) == 1
assert idx.get_loc(str(idx[1]), method) == 1
assert idx.get_loc(idx[1], "pad", tolerance=Timedelta(0)) == 1
assert idx.get_loc(idx[1], "pad", tolerance=np.timedelta64(0, "s")) == 1
assert idx.get_loc(idx[1], "pad", tolerance=timedelta(0)) == 1
with pytest.raises(ValueError, match="unit abbreviation w/o a number"):
idx.get_loc(idx[1], method="nearest", tolerance="foo")
with pytest.raises(ValueError, match="tolerance size must match"):
idx.get_loc(
idx[1],
method="nearest",
tolerance=[
Timedelta(0).to_timedelta64(),
Timedelta(0).to_timedelta64(),
],
)
for method, loc in [("pad", 1), ("backfill", 2), ("nearest", 1)]:
assert idx.get_loc("1 day 1 hour", method) == loc
# GH 16909
assert idx.get_loc(idx[1].to_timedelta64()) == 1
# GH 16896
assert idx.get_loc("0 days") == 0
def test_get_loc_nat(self):
tidx = TimedeltaIndex(["1 days 01:00:00", "NaT", "2 days 01:00:00"])
assert tidx.get_loc(pd.NaT) == 1
assert tidx.get_loc(None) == 1
assert tidx.get_loc(float("nan")) == 1
assert tidx.get_loc(np.nan) == 1
class TestGetIndexer:
def test_get_indexer(self):
idx = pd.to_timedelta(["0 days", "1 days", "2 days"])
tm.assert_numpy_array_equal(
idx.get_indexer(idx), np.array([0, 1, 2], dtype=np.intp)
)
target = pd.to_timedelta(["-1 hour", "12 hours", "1 day 1 hour"])
tm.assert_numpy_array_equal(
idx.get_indexer(target, "pad"), np.array([-1, 0, 1], dtype=np.intp)
)
tm.assert_numpy_array_equal(
idx.get_indexer(target, "backfill"), np.array([0, 1, 2], dtype=np.intp)
)
tm.assert_numpy_array_equal(
idx.get_indexer(target, "nearest"), np.array([0, 1, 1], dtype=np.intp)
)
res = idx.get_indexer(target, "nearest", tolerance=Timedelta("1 hour"))
tm.assert_numpy_array_equal(res, np.array([0, -1, 1], dtype=np.intp))
class TestWhere:
def test_where_doesnt_retain_freq(self):
tdi = timedelta_range("1 day", periods=3, freq="D", name="idx")
cond = [True, True, False]
expected = TimedeltaIndex([tdi[0], tdi[1], tdi[0]], freq=None, name="idx")
result = tdi.where(cond, tdi[::-1])
tm.assert_index_equal(result, expected)
def test_where_invalid_dtypes(self):
tdi = timedelta_range("1 day", periods=3, freq="D", name="idx")
i2 = Index([pd.NaT, pd.NaT] + tdi[2:].tolist())
msg = "value should be a 'Timedelta', 'NaT', or array of those"
with pytest.raises(TypeError, match=msg):
tdi.where(notna(i2), i2.asi8)
with pytest.raises(TypeError, match=msg):
tdi.where(notna(i2), i2 + pd.Timestamp.now())
with pytest.raises(TypeError, match=msg):
tdi.where(notna(i2), (i2 + pd.Timestamp.now()).to_period("D"))
with pytest.raises(TypeError, match=msg):
# non-matching scalar
tdi.where(notna(i2), pd.Timestamp.now())
def test_where_mismatched_nat(self):
tdi = timedelta_range("1 day", periods=3, freq="D", name="idx")
cond = np.array([True, False, False])
msg = "value should be a 'Timedelta', 'NaT', or array of those"
with pytest.raises(TypeError, match=msg):
# wrong-dtyped NaT
tdi.where(cond, np.datetime64("NaT", "ns"))
class TestTake:
def test_take(self):
# GH 10295
idx1 = timedelta_range("1 day", "31 day", freq="D", name="idx")
for idx in [idx1]:
result = idx.take([0])
assert result == Timedelta("1 day")
result = idx.take([-1])
assert result == Timedelta("31 day")
result = idx.take([0, 1, 2])
expected = timedelta_range("1 day", "3 day", freq="D", name="idx")
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
result = idx.take([0, 2, 4])
expected = timedelta_range("1 day", "5 day", freq="2D", name="idx")
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
result = idx.take([7, 4, 1])
expected = timedelta_range("8 day", "2 day", freq="-3D", name="idx")
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
result = idx.take([3, 2, 5])
expected = TimedeltaIndex(["4 day", "3 day", "6 day"], name="idx")
tm.assert_index_equal(result, expected)
assert result.freq is None
result = idx.take([-3, 2, 5])
expected = TimedeltaIndex(["29 day", "3 day", "6 day"], name="idx")
tm.assert_index_equal(result, expected)
assert result.freq is None
def test_take_invalid_kwargs(self):
idx = timedelta_range("1 day", "31 day", freq="D", name="idx")
indices = [1, 6, 5, 9, 10, 13, 15, 3]
msg = r"take\(\) got an unexpected keyword argument 'foo'"
with pytest.raises(TypeError, match=msg):
idx.take(indices, foo=2)
msg = "the 'out' parameter is not supported"
with pytest.raises(ValueError, match=msg):
idx.take(indices, out=indices)
msg = "the 'mode' parameter is not supported"
with pytest.raises(ValueError, match=msg):
idx.take(indices, mode="clip")
# TODO: This method came from test_timedelta; de-dup with version above
def test_take2(self):
tds = ["1day 02:00:00", "1 day 04:00:00", "1 day 10:00:00"]
idx = timedelta_range(start="1d", end="2d", freq="H", name="idx")
expected = TimedeltaIndex(tds, freq=None, name="idx")
taken1 = idx.take([2, 4, 10])
taken2 = idx[[2, 4, 10]]
for taken in [taken1, taken2]:
tm.assert_index_equal(taken, expected)
assert isinstance(taken, TimedeltaIndex)
assert taken.freq is None
assert taken.name == expected.name
def test_take_fill_value(self):
# GH 12631
idx = TimedeltaIndex(["1 days", "2 days", "3 days"], name="xxx")
result = idx.take(np.array([1, 0, -1]))
expected = TimedeltaIndex(["2 days", "1 days", "3 days"], name="xxx")
tm.assert_index_equal(result, expected)
# fill_value
result = idx.take(np.array([1, 0, -1]), fill_value=True)
expected = TimedeltaIndex(["2 days", "1 days", "NaT"], name="xxx")
tm.assert_index_equal(result, expected)
# allow_fill=False
result = idx.take(np.array([1, 0, -1]), allow_fill=False, fill_value=True)
expected = TimedeltaIndex(["2 days", "1 days", "3 days"], name="xxx")
tm.assert_index_equal(result, expected)
msg = (
"When allow_fill=True and fill_value is not None, "
"all indices must be >= -1"
)
with pytest.raises(ValueError, match=msg):
idx.take(np.array([1, 0, -2]), fill_value=True)
with pytest.raises(ValueError, match=msg):
idx.take(np.array([1, 0, -5]), fill_value=True)
msg = "index -5 is out of bounds for (axis 0 with )?size 3"
with pytest.raises(IndexError, match=msg):
idx.take(np.array([1, -5]))
| bsd-3-clause |
thorwhalen/ut | wserv/dashboard/test_analyzer.py | 1 | 2110 | __author__ = 'thor'
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from .analyzer import Analyzer
from .params import default
from ut.daf.to import to_html
form_elements = [
dict(name='your_name', type='text', display="Your Name", value='Unknown'),
dict(name='max_num', type='number', value=1),
dict(name='register', type='button', value='register inputs'),
dict(name='npts', type='number', value=30, display="num of rand pts"),
dict(name='graph_it', type='button', value="graph it")
]
class TestAnalyzer(Analyzer):
def __init__(self, input_element_collection=form_elements, analyzer_name='test_analyzer', work_folder='.'):
super(TestAnalyzer, self).__init__(form_elements=input_element_collection,
analyzer_name=analyzer_name)
self.a = dict()
self.a['image_html'] = '<img style="box-shadow: 3px 3px 5px 6px #ccc;" src={image_url}>'
self.work_folder = work_folder
def register(self, **kwargs):
self.set_inputs(**kwargs)
def graph_it(self, **kwargs):
y = np.random.rand(self.input['npts']) * self.input['max_num']
fig = plt.figure(figsize=(6, 6))
plt.plot(y, figure=fig)
image_name = "TestAnalyzer01.png"
image_path = os.path.join(self.work_folder, image_name)
if os.path.exists(image_path):
os.remove(image_path)
fig.savefig(image_path, **default['save_fig_params'])
html = self.a['image_html'].format(image_url=image_path)
d = pd.DataFrame({'input': list(self.input.keys()), 'value': list(self.input.values())})
html += "<br>\n" + to_html(d, template='box-table-c', index=False, float_format=lambda x: "{:,.0f}".format(x))
return html
# def generate_plot_of_traj_and_save_to_file(self, plot_fun=plot, plot_kwargs={}, save_fig_params={}):
# fig, ax = plot_fun(**plot_kwargs)
# save_fig_params = dict(default['save_fig_params'], **save_fig_params)
# fig.savefig(self.a.traj_image_filepath, **save_fig_params)
| mit |
ssg100/trading-with-python | lib/csvDatabase.py | 77 | 6045 | # -*- coding: utf-8 -*-
"""
intraday data handlers in csv format.
@author: jev
"""
from __future__ import division
import pandas as pd
import datetime as dt
import os
from extra import ProgressBar
dateFormat = "%Y%m%d" # date format for converting filenames to dates
dateTimeFormat = "%Y%m%d %H:%M:%S"
def fileName2date(fName):
'''convert filename to date'''
name = os.path.splitext(fName)[0]
return dt.datetime.strptime(name.split('_')[1],dateFormat).date()
def parseDateTime(dateTimeStr):
return dt.datetime.strptime(dateTimeStr,dateTimeFormat)
def loadCsv(fName):
''' load DataFrame from csv file '''
with open(fName,'r') as f:
lines = f.readlines()
dates= []
header = [h.strip() for h in lines[0].strip().split(',')[1:]]
data = [[] for i in range(len(header))]
for line in lines[1:]:
fields = line.rstrip().split(',')
dates.append(parseDateTime(fields[0]))
for i,field in enumerate(fields[1:]):
data[i].append(float(field))
return pd.DataFrame(data=dict(zip(header,data)),index=pd.Index(dates))
class HistDataCsv(object):
'''class for working with historic database in .csv format'''
def __init__(self,symbol,dbDir,autoCreateDir=False):
self.symbol = symbol
self.dbDir = os.path.normpath(os.path.join(dbDir,symbol))
if not os.path.exists(self.dbDir) and autoCreateDir:
print 'Creating data directory ', self.dbDir
os.mkdir(self.dbDir)
self.dates = []
for fName in os.listdir(self.dbDir):
self.dates.append(fileName2date(fName))
def saveData(self,date, df,lowerCaseColumns=True):
''' add data to database'''
if lowerCaseColumns: # this should provide consistency to column names. All lowercase
df.columns = [ c.lower() for c in df.columns]
s = self.symbol+'_'+date.strftime(dateFormat)+'.csv' # file name
dest = os.path.join(self.dbDir,s) # full path destination
print 'Saving data to: ', dest
df.to_csv(dest)
def loadDate(self,date):
''' load data '''
s = self.symbol+'_'+date.strftime(dateFormat)+'.csv' # file name
df = pd.DataFrame.from_csv(os.path.join(self.dbDir,s))
cols = [col.strip() for col in df.columns.tolist()]
df.columns = cols
#df = loadCsv(os.path.join(self.dbDir,s))
return df
def loadDates(self,dates):
''' load multiple dates, concantenating to one DataFrame '''
tmp =[]
print 'Loading multiple dates for ' , self.symbol
p = ProgressBar(len(dates))
for i,date in enumerate(dates):
tmp.append(self.loadDate(date))
p.animate(i+1)
print ''
return pd.concat(tmp)
def createOHLC(self):
''' create ohlc from intraday data'''
ohlc = pd.DataFrame(index=self.dates, columns=['open','high','low','close'])
for date in self.dates:
print 'Processing', date
try:
df = self.loadDate(date)
ohlc.set_value(date,'open',df['open'][0])
ohlc.set_value(date,'high',df['wap'].max())
ohlc.set_value(date,'low', df['wap'].min())
ohlc.set_value(date,'close',df['close'][-1])
except Exception as e:
print 'Could not convert:', e
return ohlc
def __repr__(self):
return '{symbol} dataset with {nrDates} days of data'.format(symbol=self.symbol, nrDates=len(self.dates))
class HistDatabase(object):
''' class working with multiple symbols at once '''
def __init__(self, dataDir):
# get symbols from directory names
symbols = []
for l in os.listdir(dataDir):
if os.path.isdir(os.path.join(dataDir,l)):
symbols.append(l)
#build dataset
self.csv = {} # dict of HistDataCsv halndlers
for symbol in symbols:
self.csv[symbol] = HistDataCsv(symbol,dataDir)
def loadDates(self,dates=None):
'''
get data for all symbols as wide panel
provide a dates list. If no dates list is provided, common dates are used.
'''
if dates is None: dates=self.commonDates
tmp = {}
for k,v in self.csv.iteritems():
tmp[k] = v.loadDates(dates)
return pd.WidePanel(tmp)
def toHDF(self,dataFile,dates=None):
''' write wide panel data to a hdfstore file '''
if dates is None: dates=self.commonDates
store = pd.HDFStore(dataFile)
wp = self.loadDates(dates)
store['data'] = wp
store.close()
@property
def commonDates(self):
''' return dates common for all symbols '''
t = [v.dates for v in self.csv.itervalues()] # get all dates in a list
d = list(set(t[0]).intersection(*t[1:]))
return sorted(d)
def __repr__(self):
s = '-----Hist CSV Database-----\n'
for k,v in self.csv.iteritems():
s+= (str(v)+'\n')
return s
#--------------------
if __name__=='__main__':
dbDir =os.path.normpath('D:/data/30sec')
vxx = HistDataCsv('VXX',dbDir)
spy = HistDataCsv('SPY',dbDir)
#
date = dt.date(2012,8,31)
print date
#
pair = pd.DataFrame({'SPY':spy.loadDate(date)['close'],'VXX':vxx.loadDate(date)['close']})
print pair.tail() | bsd-3-clause |
antoinecarme/pyaf | tests/neuralnet/test_ozone_rnn_only_LSTM.py | 1 | 1418 | import pandas as pd
import numpy as np
import pyaf.ForecastEngine as autof
import pyaf.Bench.TS_datasets as tsds
import logging
import logging.config
#logging.config.fileConfig('logging.conf')
logging.basicConfig(level=logging.INFO)
#get_ipython().magic('matplotlib inline')
b1 = tsds.load_ozone()
df = b1.mPastData
#df.tail(10)
#df[:-10].tail()
#df[:-10:-1]
#df.describe()
lEngine = autof.cForecastEngine()
lEngine
H = b1.mHorizon;
# lEngine.mOptions.enable_slow_mode();
lEngine.mOptions.mDebugPerformance = True;
lEngine.mOptions.mParallelMode = True;
lEngine.mOptions.set_active_autoregressions(['LSTM']);
lEngine.train(df , b1.mTimeVar , b1.mSignalVar, H);
lEngine.getModelInfo();
print(lEngine.mSignalDecomposition.mTrPerfDetails.head());
lEngine.mSignalDecomposition.mBestModel.mTimeInfo.mResolution
lEngine.standardPlots("outputs/my_rnn_ozone");
dfapp_in = df.copy();
dfapp_in.tail()
#H = 12
dfapp_out = lEngine.forecast(dfapp_in, H);
#dfapp_out.to_csv("outputs/rnn_ozone_apply_out.csv")
dfapp_out.tail(2 * H)
print("Forecast Columns " , dfapp_out.columns);
Forecast_DF = dfapp_out[[b1.mTimeVar , b1.mSignalVar, b1.mSignalVar + '_Forecast']]
print(Forecast_DF.info())
print("Forecasts\n" , Forecast_DF.tail(H));
print("\n\n<ModelInfo>")
print(lEngine.to_json());
print("</ModelInfo>\n\n")
print("\n\n<Forecast>")
print(Forecast_DF.tail(2*H).to_json(date_format='iso'))
print("</Forecast>\n\n")
| bsd-3-clause |
MaxHalford/xam | xam/preprocessing/binning/bayesian_blocks.py | 1 | 2103 | """
Bayesian blocks binning. Good for visualization.
References:
- http://adsabs.harvard.edu/abs/2012arXiv1207.5578S
- https://jakevdp.github.io/blog/2012/09/12/dynamic-programming-in-python/
"""
import numpy as np
from sklearn.utils import check_array
from .base import BaseUnsupervisedBinner
class BayesianBlocksBinner(BaseUnsupervisedBinner):
def fit(self, X, y=None, **fit_params):
# scikit-learn checks
X = check_array(X)
self.cut_points_ = [calc_bayesian_blocks(x) for x in X.T]
return self
@property
def cut_points(self):
return self.cut_points_
def calc_bayesian_blocks(x):
# Copy and sort the array
x = np.sort(x)
n = x.size
# Create length-(n + 1) array of cell edges
edges = np.concatenate([
x[:1],
0.5 * (x[1:] + x[:-1]),
x[-1:]
])
block_length = x[-1] - edges
# Arrays needed for the iteration
nn_vec = np.ones(n)
best = np.zeros(n, dtype=float)
last = np.zeros(n, dtype=int)
# Start with first data cell; add one cell at each iteration
for k in range(n):
# Compute the width and count of the final bin for all possible
# locations of the k^th changepoint
width = block_length[:k + 1] - block_length[k + 1]
count_vec = np.cumsum(nn_vec[:k + 1][::-1])[::-1]
# Evaluate fitness function for these possibilities
fit_vec = count_vec * (np.log(count_vec) - np.log(width))
fit_vec -= 4 # 4 comes from the prior on the number of changepoints
fit_vec[1:] += best[:k]
# Find the max of the fitness: this is the k^th changepoint
i_max = np.argmax(fit_vec)
last[k] = i_max
best[k] = fit_vec[i_max]
# Recover changepoints by iteratively peeling off the last block
change_points = np.zeros(n, dtype=int)
i_cp = n
ind = n
while True:
i_cp -= 1
change_points[i_cp] = ind
if ind == 0:
break
ind = last[ind - 1]
change_points = change_points[i_cp:]
return edges[change_points][1:-1]
| mit |
bnaul/scikit-learn | examples/manifold/plot_compare_methods.py | 13 | 2823 | """
=========================================
Comparison of Manifold Learning methods
=========================================
An illustration of dimensionality reduction on the S-curve dataset
with various manifold learning methods.
For a discussion and comparison of these algorithms, see the
:ref:`manifold module page <manifold>`
For a similar example, where the methods are applied to a
sphere dataset, see :ref:`sphx_glr_auto_examples_manifold_plot_manifold_sphere.py`
Note that the purpose of the MDS is to find a low-dimensional
representation of the data (here 2D) in which the distances respect well
the distances in the original high-dimensional space, unlike other
manifold-learning algorithms, it does not seeks an isotropic
representation of the data in the low-dimensional space.
"""
# Author: Jake Vanderplas -- <[email protected]>
print(__doc__)
from collections import OrderedDict
from functools import partial
from time import time
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.ticker import NullFormatter
from sklearn import manifold, datasets
# Next line to silence pyflakes. This import is needed.
Axes3D
n_points = 1000
X, color = datasets.make_s_curve(n_points, random_state=0)
n_neighbors = 10
n_components = 2
# Create figure
fig = plt.figure(figsize=(15, 8))
fig.suptitle("Manifold Learning with %i points, %i neighbors"
% (1000, n_neighbors), fontsize=14)
# Add 3d scatter plot
ax = fig.add_subplot(251, projection='3d')
ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=color, cmap=plt.cm.Spectral)
ax.view_init(4, -72)
# Set-up manifold methods
LLE = partial(manifold.LocallyLinearEmbedding,
n_neighbors, n_components, eigen_solver='auto')
methods = OrderedDict()
methods['LLE'] = LLE(method='standard')
methods['LTSA'] = LLE(method='ltsa')
methods['Hessian LLE'] = LLE(method='hessian')
methods['Modified LLE'] = LLE(method='modified')
methods['Isomap'] = manifold.Isomap(n_neighbors, n_components)
methods['MDS'] = manifold.MDS(n_components, max_iter=100, n_init=1)
methods['SE'] = manifold.SpectralEmbedding(n_components=n_components,
n_neighbors=n_neighbors)
methods['t-SNE'] = manifold.TSNE(n_components=n_components, init='pca',
random_state=0)
# Plot results
for i, (label, method) in enumerate(methods.items()):
t0 = time()
Y = method.fit_transform(X)
t1 = time()
print("%s: %.2g sec" % (label, t1 - t0))
ax = fig.add_subplot(2, 5, 2 + i + (i > 3))
ax.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
ax.set_title("%s (%.2g sec)" % (label, t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
ax.axis('tight')
plt.show()
| bsd-3-clause |
luca-penasa/mtspec-python3 | doc/conf.py | 1 | 7756 | # -*- coding: utf-8 -*-
#
# mtspec documentation build configuration file, created by
# sphinx-quickstart on Sat Dec 26 11:55:06 2009.
# :note: major modification of original version at the end
#
# This file is execfile()d with the current directory set to its containing dir.
#
# The contents of this file are pickled, so don't put values in the namespace
# that aren't pickleable (module imports are okay, they're removed automatically).
#
# All configuration values have a default value; values that are commented out
# serve to show the default value.
import sys, os
# If your extensions are in another directory, add it here. If the directory
# is relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#sys.path.append(os.path.join(os.path.abspath(os.path.dirname(__file__),
# os.path.pardir)))
# General configuration
# ---------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'matplotlib.sphinxext.plot_directive',
'matplotlib.sphinxext.only_directives']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General substitutions.
project = 'mtspec'
copyright = '2009-2010, Moritz Beyreuther, Lion Krischer'
# The default replacements for |version| and |release|, also used in various
# other places throughout the built documents.
#
# The short X.Y version.
version = '0.2'
# The full version, including alpha/beta/rc tags.
release = '0.2'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = "%B %d %H o'clock, %Y"
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directories, that shouldn't be searched
# for source files.
#exclude_dirs = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# Options for HTML output
# -----------------------
# The style sheet to use for HTML and HTML Help pages. A file of that name
# must exist either in Sphinx' static/ path, or in one of the custom paths
# given in html_static_path.
html_style = 'default.css'
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = "mtspec documentation"
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (within the static path) to place at the top of
# the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
html_use_modindex = True
# If false, no index is generated.
html_use_index = True
# If true, the index is split into individual pages for each letter.
html_split_index = False
# If true, the reST sources are included in the HTML build as _sources/<name>.
html_copy_source = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'mtspecdoc'
# Options for LaTeX output
# ------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, document class [howto/manual]).
latex_documents = [
('index', 'mtspec.tex', 'mtspec Documentation',
'Moritz Beyreuther, Lion Krischer', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
# Redefine how to process inherited methods/members
def process_inherited(app, what, name, obj, options, docstringlines):
"""
If we're including inherited members, omit their docstrings.
"""
if not options.get('inherited-members'):
return
if what in ['class', 'data', 'exception', 'function', 'module']:
return
name = name.split('.')[-1]
if what == 'method' and hasattr(obj, 'im_class'):
if name in obj.im_class.__dict__.keys():
return
if what == 'attribute' and hasattr(obj, '__objclass__'):
if name in obj.__objclass__.__dict__.keys():
return
for i in xrange(len(docstringlines)):
docstringlines.pop()
# Options for Including private Members/Methods
#----------------------------------------------
# For reference, see
# * http://bitbucket.org/birkenfeld/sphinx/src/tip/tests/test_autodoc.py
# * http://hg.sagemath.org/sage-main/file/21efb0b3fc47/doc/common/conf.py#l1
# which is the sagemath conf.py
# * http://trac.sagemath.org/sage_trac/attachment/ticket/7549/\
# trac_7549-doc_inheritance_underscore_v3.patch
# Do not skip private members
def skip_underscore(app, what, name, obj, skip, options):
"""
Conditionally include docstrings for objects whose names begin
with one underscore ('_').
"""
name = name.split('.')[-1]
if name.startswith('_') and not name.startswith('__'):
return False
return skip
from sphinx.ext.autodoc import cut_lines
# Attach this to the builder
def setup(app):
#app.connect('autodoc-process-docstring', cut_lines(2))
app.connect('autodoc-process-docstring', process_inherited)
app.connect('autodoc-skip-member', skip_underscore)
| gpl-2.0 |
contigiani/hvs | examples/RossiContigianicomparison.py | 1 | 1033 | '''
Compares the two sampling methods, one using an analytic form, one using a Montecarlo method.
'''
import numpy as np
from hvs import HVSsample, Rossi2017, Contigiani2018
ejectionmodel1 = Rossi2017()
ejectionmodel2 = Contigiani2018()
# generate...
sample1 = HVSsample(ejectionmodel1, name='Rossi MC binaries', n=1e7, verbose=True)
sample1.save('ejcatalog_mc.fits')
sample2 = HVSsample(ejectionmodel2, name='Contigiani powerlaw fit', n=1e5, verbose=True)
sample2.save('ejcatalog_fit.fits')
# ...or load
sample1 = HVSsample('ejcatalog_mc.fits')
sample2 = HVSsample('ejcatalog_fit.fits')
# Plot the mass distributions, for comparison
from matplotlib import pyplot as plt
plt.hist(sample1.m, bins=np.logspace(np.log(0.5), np.log(9), 20), histtype='step', label='Rossi+ 2017', normed=1)
plt.hist(sample2.m, bins=np.logspace(np.log(0.5), np.log(9), 20), histtype='step', label='Contigiani+ 2018', normed=1)
plt.gca().set_xscale('log')
plt.gca().set_yscale('log')
plt.gca().set_xlabel('M / Msun')
plt.legend()
plt.show()
| gpl-3.0 |
Cadair/texfigure | texfigure/setup_mpl.py | 1 | 4583 | # -*- coding: utf-8 -*-
"""
Created on Fri May 29 17:03:59 2015
@author: Stuart Mumford
This uses a fair bit of the magic from here:
http://bkanuka.com/articles/native-latex-plots/
"""
import numpy as np
import matplotlib
matplotlib.use('pgf')
def figsize(pytex, scale=None, height_ratio=None, figure_width_context="figurewidth"):
r"""
A helper for calculating figure sizes based upon latex page widths.
This uses the ``pythontexcontext`` to access the figurewith variable from
LaTeX, this function then returns a matplotlib ``figwidth`` tuple based on
the scale and height_ratio parameters.
Parameters
----------
pytex : PythonTeX Utilites Class
The PythonTeX helper class instance from the LaTeX document.
scale : float
The scale of the figure width in comparison to the textwidth, i.e.
1 = 100%.
height_ration : float
The ratio of the height to the width.
Default is the golden ratio. (~0.61), 1.0 would lead to a square
figure.
figure_width_context : `string`
The pytex context variable to use to generate the figure width.
Returns
-------
fig_size : tuple
The ``(width, height)`` tuple of the figure size in inches for mpl.
Examples
--------
.. code-block:: latex
% Give PythonTeX the Textwidth
\setpythontexcontext{figurewidth=\the\textwidth}
\begin{document}
\begin{pycode}
plt.figure(figsize=figsize(pytex))
...
\end{pycode}
"""
if hasattr(pytex, 'context'):
textwidth_pt = float(pytex.context.get(figure_width_context, 'pt')[:-2])
if not scale:
scale = float(pytex.context.get('figscale', 0.95))
else:
textwidth_pt = None
if not scale:
scale = 0.95
if not textwidth_pt:
raise AttributeError(r"pytex.context has no attribute figurewidth, please "
r"execute the following command in the preamble: "
r"\setpythontexcontext{figurewidth=\the\textwidth}")
textwidth_in = pytex.pt_to_in(textwidth_pt)
if not height_ratio:
height_ratio = (np.sqrt(5.0)-1.0)/2.0
fig_width = scale*textwidth_in # 90% width
return (fig_width, fig_width*height_ratio)
def configure_latex_plots(pytex, font_size=12, **kwargs):
"""
Configure a sane set of latex defaults for pgf figure generation.
Parameters
----------
pytex : PythonTeX Utilites class.
The pytex class from the PythonTeX Session
font_size : `float`
The default font size for the following rcParams:
| font.size
| axes.labelsize
| xtick.labelsize
| ytick.labelsize
| legend.fontsize
kwargs : `dict`
Extra keyword arguments are used to update `matplotlib.rcParams`.
"""
pgf_with_latex = {
"pgf.texsystem": "pdflatex",
"text.usetex": True,
"font.size": font_size,
"font.family": "serif",
"font.serif": [], # blank entries should cause plots to inherit fonts from the document
"font.sans-serif": [],
"font.monospace": [],
"axes.labelsize": font_size, # LaTeX default is 10pt font.
"legend.fontsize":font_size,
"xtick.labelsize": font_size,
"ytick.labelsize": font_size,
"figure.figsize": figsize(pytex),
"pgf.preamble": [
r"\usepackage[utf8x]{inputenc}", # use utf8 fonts becasue your computer can handle it :)
r"\usepackage[T1]{fontenc}", # plots will be generated using this preamble
]
}
pgf_with_latex.update(kwargs)
matplotlib.rcParams.update(pgf_with_latex)
def preamble_setup():
preamble = """
% pytexbug fix for context in customcode.
\makeatletter
\renewenvironment{pythontexcustomcode}[2][begin]{%
\VerbatimEnvironment
\Depythontex{env:pythontexcustomcode:om:n}%
\ifstrequal{#1}{begin}{}{%
\ifstrequal{#1}{end}{}{\PackageError{\pytx@packagename}%
{Invalid optional argument for pythontexcustomcode}{}
}%
}%
\xdef\pytx@type{CC:#2:#1}%
\edef\pytx@cmd{code}%
% PATCH \def\pytx@context{}%
\pytx@SetContext
% END PATCH
\def\pytx@group{none}%
\pytx@BeginCodeEnv[none]}%
{\end{VerbatimOut}%
\setcounter{FancyVerbLine}{\value{pytx@FancyVerbLineTemp}}%
\stepcounter{\pytx@counter}%
}%
\makeatother
\setpythontexcontext{textwidth=\the\textwidth}
"""
return preamble
| bsd-3-clause |
adityaarun1/faster_rcnn_pytorch | faster_rcnn/roi_data_layer/minibatch.py | 2 | 8723 | # --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""Compute minibatch blobs for training a Fast R-CNN network."""
import numpy as np
import numpy.random as npr
import cv2
import os
# TODO: make fast_rcnn irrelevant
# >>>> obsolete, because it depends on sth outside of this project
from ..fast_rcnn.config import cfg
# <<<< obsolete
from ..utils.blob import prep_im_for_blob, im_list_to_blob
def get_minibatch(roidb, num_classes):
"""Given a roidb, construct a minibatch sampled from it."""
num_images = len(roidb)
# Sample random scales to use for each image in this batch
random_scale_inds = npr.randint(0, high=len(cfg.TRAIN.SCALES),
size=num_images)
assert(cfg.TRAIN.BATCH_SIZE % num_images == 0), \
'num_images ({}) must divide BATCH_SIZE ({})'. \
format(num_images, cfg.TRAIN.BATCH_SIZE)
rois_per_image = cfg.TRAIN.BATCH_SIZE / num_images
fg_rois_per_image = np.round(cfg.TRAIN.FG_FRACTION * rois_per_image)
# Get the input image blob, formatted for caffe
im_blob, im_scales = _get_image_blob(roidb, random_scale_inds)
blobs = {'data': im_blob}
if cfg.TRAIN.HAS_RPN:
assert len(im_scales) == 1, "Single batch only"
assert len(roidb) == 1, "Single batch only"
# gt boxes: (x1, y1, x2, y2, cls)
gt_inds = np.where(roidb[0]['gt_classes'] != 0)[0]
gt_boxes = np.empty((len(gt_inds), 5), dtype=np.float32)
gt_boxes[:, 0:4] = roidb[0]['boxes'][gt_inds, :] * im_scales[0]
gt_boxes[:, 4] = roidb[0]['gt_classes'][gt_inds]
blobs['gt_boxes'] = gt_boxes
blobs['gt_ishard'] = roidb[0]['gt_ishard'][gt_inds] \
if 'gt_ishard' in roidb[0] else np.zeros(gt_inds.size, dtype=int)
# blobs['gt_ishard'] = roidb[0]['gt_ishard'][gt_inds]
blobs['dontcare_areas'] = roidb[0]['dontcare_areas'] * im_scales[0] \
if 'dontcare_areas' in roidb[0] else np.zeros([0, 4], dtype=float)
blobs['im_info'] = np.array(
[[im_blob.shape[1], im_blob.shape[2], im_scales[0]]],
dtype=np.float32)
blobs['im_name'] = os.path.basename(roidb[0]['image'])
else: # not using RPN
# Now, build the region of interest and label blobs
rois_blob = np.zeros((0, 5), dtype=np.float32)
labels_blob = np.zeros((0), dtype=np.float32)
bbox_targets_blob = np.zeros((0, 4 * num_classes), dtype=np.float32)
bbox_inside_blob = np.zeros(bbox_targets_blob.shape, dtype=np.float32)
# all_overlaps = []
for im_i in range(num_images):
labels, overlaps, im_rois, bbox_targets, bbox_inside_weights \
= _sample_rois(roidb[im_i], fg_rois_per_image, rois_per_image,
num_classes)
# Add to RoIs blob
rois = _project_im_rois(im_rois, im_scales[im_i])
batch_ind = im_i * np.ones((rois.shape[0], 1))
rois_blob_this_image = np.hstack((batch_ind, rois))
rois_blob = np.vstack((rois_blob, rois_blob_this_image))
# Add to labels, bbox targets, and bbox loss blobs
labels_blob = np.hstack((labels_blob, labels))
bbox_targets_blob = np.vstack((bbox_targets_blob, bbox_targets))
bbox_inside_blob = np.vstack((bbox_inside_blob, bbox_inside_weights))
# all_overlaps = np.hstack((all_overlaps, overlaps))
# For debug visualizations
# _vis_minibatch(im_blob, rois_blob, labels_blob, all_overlaps)
blobs['rois'] = rois_blob
blobs['labels'] = labels_blob
if cfg.TRAIN.BBOX_REG:
blobs['bbox_targets'] = bbox_targets_blob
blobs['bbox_inside_weights'] = bbox_inside_blob
blobs['bbox_outside_weights'] = \
np.array(bbox_inside_blob > 0).astype(np.float32)
return blobs
def _sample_rois(roidb, fg_rois_per_image, rois_per_image, num_classes):
"""Generate a random sample of RoIs comprising foreground and background
examples.
"""
# label = class RoI has max overlap with
labels = roidb['max_classes']
overlaps = roidb['max_overlaps']
rois = roidb['boxes']
# Select foreground RoIs as those with >= FG_THRESH overlap
fg_inds = np.where(overlaps >= cfg.TRAIN.FG_THRESH)[0]
# Guard against the case when an image has fewer than fg_rois_per_image
# foreground RoIs
fg_rois_per_this_image = np.minimum(fg_rois_per_image, fg_inds.size)
# Sample foreground regions without replacement
if fg_inds.size > 0:
fg_inds = npr.choice(
fg_inds, size=fg_rois_per_this_image, replace=False)
# Select background RoIs as those within [BG_THRESH_LO, BG_THRESH_HI)
bg_inds = np.where((overlaps < cfg.TRAIN.BG_THRESH_HI) &
(overlaps >= cfg.TRAIN.BG_THRESH_LO))[0]
# Compute number of background RoIs to take from this image (guarding
# against there being fewer than desired)
bg_rois_per_this_image = rois_per_image - fg_rois_per_this_image
bg_rois_per_this_image = np.minimum(bg_rois_per_this_image,
bg_inds.size)
# Sample foreground regions without replacement
if bg_inds.size > 0:
bg_inds = npr.choice(
bg_inds, size=bg_rois_per_this_image, replace=False)
# The indices that we're selecting (both fg and bg)
keep_inds = np.append(fg_inds, bg_inds)
# Select sampled values from various arrays:
labels = labels[keep_inds]
# Clamp labels for the background RoIs to 0
labels[fg_rois_per_this_image:] = 0
overlaps = overlaps[keep_inds]
rois = rois[keep_inds]
bbox_targets, bbox_inside_weights = _get_bbox_regression_labels(
roidb['bbox_targets'][keep_inds, :], num_classes)
return labels, overlaps, rois, bbox_targets, bbox_inside_weights
def _get_image_blob(roidb, scale_inds):
"""Builds an input blob from the images in the roidb at the specified
scales.
"""
num_images = len(roidb)
processed_ims = []
im_scales = []
for i in range(num_images):
im = cv2.imread(roidb[i]['image'])
if roidb[i]['flipped']:
im = im[:, ::-1, :]
target_size = cfg.TRAIN.SCALES[scale_inds[i]]
im, im_scale = prep_im_for_blob(im, cfg.PIXEL_MEANS, target_size,
cfg.TRAIN.MAX_SIZE)
im_scales.append(im_scale)
processed_ims.append(im)
# Create a blob to hold the input images
blob = im_list_to_blob(processed_ims)
return blob, im_scales
def _project_im_rois(im_rois, im_scale_factor):
"""Project image RoIs into the rescaled training image."""
rois = im_rois * im_scale_factor
return rois
def _get_bbox_regression_labels(bbox_target_data, num_classes):
"""Bounding-box regression targets are stored in a compact form in the
roidb.
This function expands those targets into the 4-of-4*K representation used
by the network (i.e. only one class has non-zero targets). The loss weights
are similarly expanded.
Returns:
bbox_target_data (ndarray): N x 4K blob of regression targets
bbox_inside_weights (ndarray): N x 4K blob of loss weights
"""
clss = bbox_target_data[:, 0]
bbox_targets = np.zeros((clss.size, 4 * num_classes), dtype=np.float32)
bbox_inside_weights = np.zeros(bbox_targets.shape, dtype=np.float32)
inds = np.where(clss > 0)[0]
for ind in inds:
cls = clss[ind]
start = 4 * cls
end = start + 4
bbox_targets[ind, start:end] = bbox_target_data[ind, 1:]
bbox_inside_weights[ind, start:end] = cfg.TRAIN.BBOX_INSIDE_WEIGHTS
return bbox_targets, bbox_inside_weights
def _vis_minibatch(im_blob, rois_blob, labels_blob, overlaps):
"""Visualize a mini-batch for debugging."""
import matplotlib.pyplot as plt
for i in range(rois_blob.shape[0]):
rois = rois_blob[i, :]
im_ind = rois[0]
roi = rois[1:]
im = im_blob[im_ind, :, :, :].transpose((1, 2, 0)).copy()
im += cfg.PIXEL_MEANS
im = im[:, :, (2, 1, 0)]
im = im.astype(np.uint8)
cls = labels_blob[i]
plt.imshow(im)
print('class: ', cls, ' overlap: ', overlaps[i])
plt.gca().add_patch(
plt.Rectangle((roi[0], roi[1]), roi[2] - roi[0],
roi[3] - roi[1], fill=False,
edgecolor='r', linewidth=3)
)
plt.show()
| mit |
synthicity/urbansim | urbansim/models/tests/test_dcm.py | 3 | 22138 | import numpy as np
import numpy.testing as npt
import pandas as pd
import pytest
import os
import tempfile
import yaml
from pandas.util import testing as pdt
from ...utils import testing
from .. import dcm
@pytest.fixture
def seed(request):
current = np.random.get_state()
def fin():
np.random.set_state(current)
request.addfinalizer(fin)
np.random.seed(0)
@pytest.fixture
def choosers():
return pd.DataFrame(
{'var1': range(5, 10),
'thing_id': ['a', 'c', 'e', 'g', 'i']})
@pytest.fixture
def grouped_choosers(choosers):
choosers['group'] = ['x', 'y', 'x', 'x', 'y']
return choosers
@pytest.fixture
def alternatives():
return pd.DataFrame(
{'var2': range(10, 20),
'var3': range(20, 30)},
index=pd.Index([x for x in 'abcdefghij'], name='thing_id'))
@pytest.fixture
def basic_dcm():
model_exp = 'var2 + var1:var3'
sample_size = 5
probability_mode = 'full_product'
choice_mode = 'individual'
choosers_fit_filters = ['var1 != 5']
choosers_predict_filters = ['var1 != 7']
alts_fit_filters = ['var3 != 15']
alts_predict_filters = ['var2 != 14']
interaction_predict_filters = None
estimation_sample_size = None
prediction_sample_size = None
choice_column = None
name = 'Test LCM'
model = dcm.MNLDiscreteChoiceModel(
model_exp, sample_size,
probability_mode, choice_mode,
choosers_fit_filters, choosers_predict_filters,
alts_fit_filters, alts_predict_filters,
interaction_predict_filters, estimation_sample_size,
prediction_sample_size, choice_column, name)
return model
@pytest.fixture
def basic_dcm_fit(basic_dcm, choosers, alternatives):
basic_dcm.fit(choosers, alternatives, choosers.thing_id)
return basic_dcm
def test_unit_choice_uniform(choosers, alternatives):
probabilities = [1] * len(alternatives)
choices = dcm.unit_choice(
choosers.index, alternatives.index, probabilities)
npt.assert_array_equal(choices.index, choosers.index)
assert choices.isin(alternatives.index).all()
def test_unit_choice_some_zero(choosers, alternatives):
probabilities = [0, 1, 0, 1, 1, 0, 1, 0, 0, 1]
choices = dcm.unit_choice(
choosers.index, alternatives.index, probabilities)
npt.assert_array_equal(choices.index, choosers.index)
npt.assert_array_equal(sorted(choices.values), ['b', 'd', 'e', 'g', 'j'])
def test_unit_choice_not_enough(choosers, alternatives):
probabilities = [0, 0, 0, 0, 0, 1, 0, 1, 0, 0]
choices = dcm.unit_choice(
choosers.index, alternatives.index, probabilities)
npt.assert_array_equal(choices.index, choosers.index)
assert choices.isnull().sum() == 3
npt.assert_array_equal(sorted(choices[~choices.isnull()]), ['f', 'h'])
def test_unit_choice_none_available(choosers, alternatives):
probabilities = [0] * len(alternatives)
choices = dcm.unit_choice(
choosers.index, alternatives.index, probabilities)
npt.assert_array_equal(choices.index, choosers.index)
assert choices.isnull().all()
def test_mnl_dcm_prob_choice_mode_compat(basic_dcm):
with pytest.raises(ValueError):
dcm.MNLDiscreteChoiceModel(
basic_dcm.model_expression, basic_dcm.sample_size,
probability_mode='single_chooser', choice_mode='individual')
with pytest.raises(ValueError):
dcm.MNLDiscreteChoiceModel(
basic_dcm.model_expression, basic_dcm.sample_size,
probability_mode='full_product', choice_mode='aggregate')
def test_mnl_dcm_prob_mode_interaction_compat(basic_dcm):
with pytest.raises(ValueError):
dcm.MNLDiscreteChoiceModel(
basic_dcm.model_expression, basic_dcm.sample_size,
probability_mode='full_product', choice_mode='individual',
interaction_predict_filters=['var1 > 9000'])
def test_mnl_dcm(seed, basic_dcm, choosers, alternatives):
assert basic_dcm.choosers_columns_used() == ['var1']
assert set(basic_dcm.alts_columns_used()) == {'var2', 'var3'}
assert set(basic_dcm.interaction_columns_used()) == \
{'var1', 'var2', 'var3'}
assert set(basic_dcm.columns_used()) == {'var1', 'var2', 'var3'}
loglik = basic_dcm.fit(choosers, alternatives, choosers.thing_id)
basic_dcm.report_fit()
# hard to test things exactly because there's some randomness
# involved, but can at least do a smoke test.
assert len(loglik) == 3
assert len(basic_dcm.fit_parameters) == 2
assert len(basic_dcm.fit_parameters.columns) == 3
filtered_choosers, filtered_alts = basic_dcm.apply_predict_filters(
choosers, alternatives)
probs = basic_dcm.probabilities(choosers, alternatives)
assert len(probs) == len(filtered_choosers) * len(filtered_alts)
sprobs = basic_dcm.summed_probabilities(choosers, alternatives)
assert len(sprobs) == len(filtered_alts)
pdt.assert_index_equal(
sprobs.index, filtered_alts.index, check_names=False)
npt.assert_allclose(sprobs.sum(), len(filtered_choosers))
choices = basic_dcm.predict(choosers.iloc[1:], alternatives)
pdt.assert_series_equal(
choices,
pd.Series(
['h', 'c', 'f'], index=pd.Index([1, 3, 4], name='chooser_id')))
# check that we can do a YAML round-trip
yaml_str = basic_dcm.to_yaml()
new_model = dcm.MNLDiscreteChoiceModel.from_yaml(yaml_str)
assert new_model.fitted
testing.assert_frames_equal(
basic_dcm.fit_parameters, new_model.fit_parameters)
def test_mnl_dcm_repeated_alts(basic_dcm, choosers, alternatives):
interaction_predict_filters = ['var1 * var2 > 50']
choice_column = 'thing_id'
basic_dcm.probability_mode = 'single_chooser'
basic_dcm.choice_mode = 'aggregate'
basic_dcm.interaction_predict_filters = interaction_predict_filters
basic_dcm.choice_column = choice_column
loglik = basic_dcm.fit(choosers, alternatives, 'thing_id')
basic_dcm.report_fit()
# hard to test things exactly because there's some randomness
# involved, but can at least do a smoke test.
assert len(loglik) == 3
assert len(basic_dcm.fit_parameters) == 2
assert len(basic_dcm.fit_parameters.columns) == 3
repeated_index = alternatives.index.repeat([1, 2, 3, 2, 4, 3, 2, 1, 5, 8])
repeated_alts = alternatives.loc[repeated_index].reset_index()
choices = basic_dcm.predict(choosers, repeated_alts)
pdt.assert_index_equal(choices.index, pd.Index([0, 1, 3, 4]))
assert choices.isin(repeated_alts.index).all()
def test_mnl_dcm_yaml(basic_dcm, choosers, alternatives):
expected_dict = {
'model_type': 'discretechoice',
'model_expression': basic_dcm.model_expression,
'sample_size': basic_dcm.sample_size,
'name': basic_dcm.name,
'probability_mode': basic_dcm.probability_mode,
'choice_mode': basic_dcm.choice_mode,
'choosers_fit_filters': basic_dcm.choosers_fit_filters,
'choosers_predict_filters': basic_dcm.choosers_predict_filters,
'alts_fit_filters': basic_dcm.alts_fit_filters,
'alts_predict_filters': basic_dcm.alts_predict_filters,
'interaction_predict_filters': basic_dcm.interaction_predict_filters,
'estimation_sample_size': basic_dcm.estimation_sample_size,
'prediction_sample_size': basic_dcm.prediction_sample_size,
'choice_column': basic_dcm.choice_column,
'fitted': False,
'log_likelihoods': None,
'fit_parameters': None
}
assert yaml.safe_load(basic_dcm.to_yaml()) == expected_dict
new_mod = dcm.MNLDiscreteChoiceModel.from_yaml(basic_dcm.to_yaml())
assert yaml.safe_load(new_mod.to_yaml()) == expected_dict
basic_dcm.fit(choosers, alternatives, 'thing_id')
expected_dict['fitted'] = True
del expected_dict['log_likelihoods']
del expected_dict['fit_parameters']
actual_dict = yaml.safe_load(basic_dcm.to_yaml())
assert isinstance(actual_dict.pop('log_likelihoods'), dict)
assert isinstance(actual_dict.pop('fit_parameters'), dict)
assert actual_dict == expected_dict
new_mod = dcm.MNLDiscreteChoiceModel.from_yaml(basic_dcm.to_yaml())
assert new_mod.fitted is True
def test_mnl_dcm_prob_mode_single(seed, basic_dcm_fit, choosers, alternatives):
basic_dcm_fit.probability_mode = 'single_chooser'
filtered_choosers, filtered_alts = basic_dcm_fit.apply_predict_filters(
choosers, alternatives)
probs = basic_dcm_fit.probabilities(choosers.iloc[1:], alternatives)
pdt.assert_series_equal(
probs,
pd.Series(
[0.25666709612190147,
0.20225620916965448,
0.15937989234214262,
0.1255929308043417,
0.077988133629030815,
0.061455420294827229,
0.04842747874412457,
0.038161332007195688,
0.030071506886781514],
index=pd.MultiIndex.from_product(
[[1], filtered_alts.index.values],
names=['chooser_id', 'alternative_id'])))
sprobs = basic_dcm_fit.summed_probabilities(choosers, alternatives)
pdt.assert_index_equal(
sprobs.index, filtered_alts.index, check_names=False)
npt.assert_allclose(sprobs.sum(), len(filtered_choosers))
def test_mnl_dcm_prob_mode_single_prediction_sample_size(
seed, basic_dcm_fit, choosers, alternatives):
basic_dcm_fit.probability_mode = 'single_chooser'
basic_dcm_fit.prediction_sample_size = 5
filtered_choosers, filtered_alts = basic_dcm_fit.apply_predict_filters(
choosers, alternatives)
probs = basic_dcm_fit.probabilities(choosers.iloc[1:], alternatives)
pdt.assert_series_equal(
probs,
pd.Series(
[0.11137766,
0.05449957,
0.14134044,
0.22761617,
0.46516616],
index=pd.MultiIndex.from_product(
[[1], ['g', 'j', 'f', 'd', 'a']],
names=['chooser_id', 'alternative_id'])))
sprobs = basic_dcm_fit.summed_probabilities(choosers, alternatives)
pdt.assert_index_equal(
sprobs.index,
pd.Index(['d', 'g', 'a', 'c', 'd'], name='alternative_id'))
npt.assert_allclose(sprobs.sum(), len(filtered_choosers))
def test_mnl_dcm_prob_mode_full_prediction_sample_size(
seed, basic_dcm_fit, choosers, alternatives):
basic_dcm_fit.probability_mode = 'full_product'
basic_dcm_fit.prediction_sample_size = 5
filtered_choosers, filtered_alts = basic_dcm_fit.apply_predict_filters(
choosers, alternatives)
probs = basic_dcm_fit.probabilities(choosers.iloc[1:], alternatives)
assert len(probs) == (len(filtered_choosers) - 1) * 5
npt.assert_allclose(probs.sum(), len(filtered_choosers) - 1)
sprobs = basic_dcm_fit.summed_probabilities(choosers, alternatives)
pdt.assert_index_equal(
sprobs.index, filtered_alts.index, check_names=False)
npt.assert_allclose(sprobs.sum(), len(filtered_choosers))
def test_mnl_dcm_choice_mode_agg(seed, basic_dcm_fit, choosers, alternatives):
basic_dcm_fit.probability_mode = 'single_chooser'
basic_dcm_fit.choice_mode = 'aggregate'
filtered_choosers, filtered_alts = basic_dcm_fit.apply_predict_filters(
choosers, alternatives)
choices = basic_dcm_fit.predict(choosers, alternatives)
pdt.assert_series_equal(
choices,
pd.Series(['f', 'a', 'd', 'c'], index=[0, 1, 3, 4]))
def test_mnl_dcm_group(seed, grouped_choosers, alternatives):
model_exp = 'var2 + var1:var3'
sample_size = 4
choosers_predict_filters = ['var1 != 7']
alts_predict_filters = ['var2 != 14']
group = dcm.MNLDiscreteChoiceModelGroup('group')
group.add_model_from_params(
'x', model_exp, sample_size,
choosers_predict_filters=choosers_predict_filters)
group.add_model_from_params(
'y', model_exp, sample_size, alts_predict_filters=alts_predict_filters)
assert group.choosers_columns_used() == ['var1']
assert group.alts_columns_used() == ['var2']
assert set(group.interaction_columns_used()) == {'var1', 'var2', 'var3'}
assert set(group.columns_used()) == {'var1', 'var2', 'var3'}
assert group.fitted is False
logliks = group.fit(grouped_choosers, alternatives, 'thing_id')
assert group.fitted is True
assert 'x' in logliks and 'y' in logliks
assert isinstance(logliks['x'], dict) and isinstance(logliks['y'], dict)
probs = group.probabilities(grouped_choosers, alternatives)
for name, df in grouped_choosers.groupby('group'):
assert name in probs
filtered_choosers, filtered_alts = \
group.models[name].apply_predict_filters(df, alternatives)
assert len(probs[name]) == len(filtered_choosers) * len(filtered_alts)
filtered_choosers, filtered_alts = group.apply_predict_filters(
grouped_choosers, alternatives)
sprobs = group.summed_probabilities(grouped_choosers, alternatives)
assert len(sprobs) == len(filtered_alts)
pdt.assert_index_equal(
sprobs.index, filtered_alts.index, check_names=False)
choice_state = np.random.get_state()
choices = group.predict(grouped_choosers, alternatives)
pdt.assert_series_equal(
choices,
pd.Series(
['c', 'a', 'a', 'g'],
index=pd.Index([0, 3, 1, 4], name='chooser_id')))
# check that we don't get the same alt twice if they are removed
# make sure we're starting from the same random state as the last draw
np.random.set_state(choice_state)
group.remove_alts = True
choices = group.predict(grouped_choosers, alternatives)
pdt.assert_series_equal(
choices,
pd.Series(
['c', 'a', 'b', 'g'],
index=pd.Index([0, 3, 1, 4], name='chooser_id')))
def test_mnl_dcm_segmented_raises():
group = dcm.SegmentedMNLDiscreteChoiceModel('group', 2)
with pytest.raises(ValueError):
group.add_segment('x')
def test_mnl_dcm_segmented_prob_choice_mode_compat():
with pytest.raises(ValueError):
dcm.SegmentedMNLDiscreteChoiceModel(
'group', 10,
probability_mode='single_chooser', choice_mode='individual')
with pytest.raises(ValueError):
dcm.SegmentedMNLDiscreteChoiceModel(
'group', 10,
probability_mode='full_product', choice_mode='aggregate')
def test_mnl_dcm_segmented_prob_mode_interaction_compat():
with pytest.raises(ValueError):
dcm.SegmentedMNLDiscreteChoiceModel(
'group', 10,
probability_mode='full_product', choice_mode='individual',
interaction_predict_filters=['var1 > 9000'])
def test_mnl_dcm_segmented(seed, grouped_choosers, alternatives):
model_exp = 'var2 + var1:var3'
sample_size = 4
group = dcm.SegmentedMNLDiscreteChoiceModel(
'group', sample_size, default_model_expr=model_exp)
group.add_segment('x')
group.add_segment('y', 'var3 + var1:var2')
assert group.choosers_columns_used() == []
assert group.alts_columns_used() == []
assert set(group.interaction_columns_used()) == {'var1', 'var2', 'var3'}
assert set(group.columns_used()) == {'group', 'var1', 'var2', 'var3'}
assert group.fitted is False
logliks = group.fit(grouped_choosers, alternatives, 'thing_id')
assert group.fitted is True
assert 'x' in logliks and 'y' in logliks
assert isinstance(logliks['x'], dict) and isinstance(logliks['y'], dict)
probs = group.probabilities(grouped_choosers, alternatives)
for name, df in grouped_choosers.groupby('group'):
assert name in probs
assert len(probs[name]) == len(df) * len(alternatives)
sprobs = group.summed_probabilities(grouped_choosers, alternatives)
assert len(sprobs) == len(alternatives)
pdt.assert_index_equal(
sprobs.index, alternatives.index, check_names=False)
choice_state = np.random.get_state()
choices = group.predict(grouped_choosers, alternatives)
pdt.assert_series_equal(
choices,
pd.Series(
['c', 'a', 'b', 'a', 'j'],
index=pd.Index([0, 2, 3, 1, 4], name='chooser_id')))
# check that we don't get the same alt twice if they are removed
# make sure we're starting from the same random state as the last draw
np.random.set_state(choice_state)
group._group.remove_alts = True
choices = group.predict(grouped_choosers, alternatives)
pdt.assert_series_equal(
choices,
pd.Series(
['c', 'a', 'b', 'd', 'j'],
index=pd.Index([0, 2, 3, 1, 4], name='chooser_id')))
def test_mnl_dcm_segmented_yaml(grouped_choosers, alternatives):
model_exp = 'var2 + var1:var3'
sample_size = 4
group = dcm.SegmentedMNLDiscreteChoiceModel(
'group', sample_size, default_model_expr=model_exp, name='test_seg',
probability_mode='single_chooser', choice_mode='aggregate',
estimation_sample_size=20, prediction_sample_size=30)
group.add_segment('x')
group.add_segment('y', 'var3 + var1:var2')
expected_dict = {
'model_type': 'segmented_discretechoice',
'name': 'test_seg',
'segmentation_col': 'group',
'sample_size': sample_size,
'probability_mode': 'single_chooser',
'choice_mode': 'aggregate',
'choosers_fit_filters': None,
'choosers_predict_filters': None,
'alts_fit_filters': None,
'alts_predict_filters': None,
'interaction_predict_filters': None,
'estimation_sample_size': 20,
'prediction_sample_size': 30,
'choice_column': None,
'default_config': {
'model_expression': model_exp,
},
'remove_alts': False,
'fitted': False,
'models': {
'x': {
'name': 'x',
'fitted': False,
'log_likelihoods': None,
'fit_parameters': None
},
'y': {
'name': 'y',
'model_expression': 'var3 + var1:var2',
'fitted': False,
'log_likelihoods': None,
'fit_parameters': None
}
}
}
assert yaml.safe_load(group.to_yaml()) == expected_dict
new_seg = dcm.SegmentedMNLDiscreteChoiceModel.from_yaml(group.to_yaml())
assert yaml.safe_load(new_seg.to_yaml()) == expected_dict
group.fit(grouped_choosers, alternatives, 'thing_id')
expected_dict['fitted'] = True
expected_dict['models']['x']['fitted'] = True
expected_dict['models']['y']['fitted'] = True
del expected_dict['models']['x']['fit_parameters']
del expected_dict['models']['x']['log_likelihoods']
del expected_dict['models']['y']['fit_parameters']
del expected_dict['models']['y']['log_likelihoods']
actual_dict = yaml.safe_load(group.to_yaml())
assert isinstance(actual_dict['models']['x'].pop('fit_parameters'), dict)
assert isinstance(actual_dict['models']['x'].pop('log_likelihoods'), dict)
assert isinstance(actual_dict['models']['y'].pop('fit_parameters'), dict)
assert isinstance(actual_dict['models']['y'].pop('log_likelihoods'), dict)
assert actual_dict == expected_dict
new_seg = dcm.SegmentedMNLDiscreteChoiceModel.from_yaml(group.to_yaml())
assert new_seg.fitted is True
# check that the segmented model's probability mode and choice mode
# are propogated to individual segments' models
assert (
new_seg._group.models['x'].probability_mode ==
expected_dict['probability_mode'])
assert (
new_seg._group.models['y'].choice_mode ==
expected_dict['choice_mode'])
assert (
new_seg._group.models['x'].estimation_sample_size ==
expected_dict['estimation_sample_size'])
assert (
new_seg._group.models['y'].prediction_sample_size ==
expected_dict['prediction_sample_size'])
def test_segmented_dcm_removes_old_models(grouped_choosers, alternatives):
model_exp = 'var2 + var1:var3'
sample_size = 4
group = dcm.SegmentedMNLDiscreteChoiceModel(
'group', sample_size, default_model_expr=model_exp)
group.add_segment('a')
group.add_segment('b')
group.add_segment('c')
group.fit(grouped_choosers, alternatives, 'thing_id')
assert sorted(group._group.models.keys()) == ['x', 'y']
def test_fit_from_cfg(basic_dcm, choosers, alternatives):
cfgname = tempfile.NamedTemporaryFile(suffix='.yaml').name
basic_dcm.to_yaml(cfgname)
dcm.MNLDiscreteChoiceModel.fit_from_cfg(
choosers, "thing_id", alternatives, cfgname)
dcm.MNLDiscreteChoiceModel.predict_from_cfg(
choosers, alternatives, cfgname)
dcm.MNLDiscreteChoiceModel.predict_from_cfg(choosers, alternatives,
cfgname, .2)
os.remove(cfgname)
def test_fit_from_cfg_segmented(grouped_choosers, alternatives):
model_exp = 'var2 + var1:var3'
sample_size = 4
group = dcm.SegmentedMNLDiscreteChoiceModel(
'group', sample_size, default_model_expr=model_exp)
group.add_segment('x')
group.add_segment('y', 'var3 + var1:var2')
cfgname = tempfile.NamedTemporaryFile(suffix='.yaml').name
group.to_yaml(cfgname)
dcm.SegmentedMNLDiscreteChoiceModel.fit_from_cfg(grouped_choosers,
"thing_id",
alternatives,
cfgname)
dcm.SegmentedMNLDiscreteChoiceModel.predict_from_cfg(grouped_choosers,
alternatives,
cfgname)
dcm.SegmentedMNLDiscreteChoiceModel.predict_from_cfg(grouped_choosers,
alternatives,
cfgname,
.8)
os.remove(cfgname)
| bsd-3-clause |
chuckoy/debate_analytics | debate_analytics/debate_analytics.py | 1 | 1588 | import json
import pandas as pd
import matplotlib.pyplot as plt
from collections import Counter
class DebateAnalytics():
"""
Analyses the statuses sent to it by the StatusListener it registers itself
into
:status_listener: the StatusListener that it listens to (listenerInception)
"""
def __init__(self, status_listener):
status_listener.register_observer(self)
self.keywords = ['#MIRIAM2016', '#Duterte2016',
'#RoxasRobredo2016', '#OnlyBinay', '#POE2016']
self.candidates = ['Miriam', 'Duterte', 'Roxas', 'Binay', 'Poe']
self.candidates_hashtags = dict(zip(self.keywords, self.candidates))
self.tweet_counts = Counter(self.candidates)
for candidate in self.candidates:
self.tweet_counts[candidate] = 0
print "WE'RE ALL LOADED UP AND READY TO GO"
def count_tweets(self, tweets_data):
return len(tweets_data)
def notify_new_status(self, status):
# See which candidate/s the tweet is talking about
tweet = json.loads(status)
self.update_counts(tweet['text'])
def update_counts(self, content):
print content
for hashtag, candidate in self.candidates_hashtags.items():
if hashtag.lower() in content.lower():
self.tweet_counts[candidate] += 1
print "Hashtag was {}, added 1 to {}".format(hashtag, candidate)
for candidate, count in self.tweet_counts.items():
print "{} now has {} number of tweets.".format(candidate, count)
print "\n"
| mit |
Garrett-R/scikit-learn | examples/gaussian_process/gp_diabetes_dataset.py | 17 | 2021 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
========================================================================
Gaussian Processes regression: goodness-of-fit on the 'diabetes' dataset
========================================================================
This example consists in fitting a Gaussian Process model onto the diabetes
dataset.
The correlation parameters are determined by means of maximum likelihood
estimation (MLE). An anisotropic squared exponential correlation model with a
constant regression model are assumed. We also used a nugget = 1e-2 in order to
account for the (strong) noise in the targets.
We compute then compute a cross-validation estimate of the coefficient of
determination (R2) without reperforming MLE, using the set of correlation
parameters found on the whole dataset.
"""
print(__doc__)
# Author: Vincent Dubourg <[email protected]>
# Licence: BSD 3 clause
from sklearn import datasets
from sklearn.gaussian_process import GaussianProcess
from sklearn.cross_validation import cross_val_score, KFold
# Load the dataset from scikit's data sets
diabetes = datasets.load_diabetes()
X, y = diabetes.data, diabetes.target
# Instanciate a GP model
gp = GaussianProcess(regr='constant', corr='absolute_exponential',
theta0=[1e-4] * 10, thetaL=[1e-12] * 10,
thetaU=[1e-2] * 10, nugget=1e-2, optimizer='Welch')
# Fit the GP model to the data performing maximum likelihood estimation
gp.fit(X, y)
# Deactivate maximum likelihood estimation for the cross-validation loop
gp.theta0 = gp.theta_ # Given correlation parameter = MLE
gp.thetaL, gp.thetaU = None, None # None bounds deactivate MLE
# Perform a cross-validation estimate of the coefficient of determination using
# the cross_validation module using all CPUs available on the machine
K = 20 # folds
R2 = cross_val_score(gp, X, y=y, cv=KFold(y.size, K), n_jobs=1).mean()
print("The %d-Folds estimate of the coefficient of determination is R2 = %s"
% (K, R2))
| bsd-3-clause |
Haunter17/MIR_SU17 | exp3/exp3c_4/exp3c_4.py | 1 | 24222 | import numpy as np
import tensorflow as tf
import h5py
import time
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import sys
# Functions for initializing neural nets parameters
def init_weight_variable(shape, nameIn):
initial = tf.truncated_normal(shape, stddev=0.1, dtype=tf.float32)
return tf.Variable(initial, name=nameIn)
def init_bias_variable(shape, nameIn):
initial = tf.constant(0.1, shape=shape, dtype=tf.float32)
return tf.Variable(initial, name=nameIn)
def conv2d(x, W):
return tf.nn.conv2d(x, W, [1, 1, 1, 1], 'VALID')
def loadData(filepath):
'''
Load and return four variables from the file with path filepath
X_train: input data for training
y_train: labels for X_train
X_val: input data for validation
y_val: labels for X_val
'''
print('==> Experiment 2l')
print('==> Loading data from {}'.format(filepath))
# benchmark
t_start = time.time()
# reading data
f = h5py.File(filepath)
X_train = np.array(f.get('trainingFeatures'))
y_train = np.array(f.get('trainingLabels'))
X_val = np.array(f.get('validationFeatures'))
y_val = np.array(f.get('validationLabels'))
t_end = time.time()
print('--Time elapsed for loading data: {t:.2f} \
seconds'.format(t = t_end - t_start))
del f
print('-- Number of training samples: {}'.format(X_train.shape[0]))
print('-- Number of validation samples: {}'.format(X_val.shape[0]))
print('Shape of X_train: %s'%str(X_train.shape))
print('Shape of y_train: %s'%str(y_train.shape))
print('Shape of X_val: %s'%str(X_val.shape))
print('Shape of y_val: %s'%str(y_val.shape))
return [X_train, y_train, X_val, y_val]
#self, X_train, y_train, X_val, y_val, num_freq, filter_row, filter_col, k1, k2, learningRate, pooling_strategy):
# set up property that makes it only be set once
# we'll use this to avoid adding tensors to the graph multiple times
import functools
def lazy_property(function):
attribute = '_cache_' + function.__name__
@property
@functools.wraps(function)
def decorator(self):
if not hasattr(self, attribute):
setattr(self, attribute, function(self))
return getattr(self, attribute)
return decorator
class Model:
def __init__(self, num_freq, X_train, y_train, X_val, y_val, filter_row, filter_col, k1, learningRate, debug):
'''
Initializer for the model
'''
# store the data
self.X_train, self.y_train, self.X_val, self.y_val = X_train, y_train, X_val, y_val
# store the parameters sent to init that define our model
self.num_freq, self.filter_row, self.filter_col, self.k1, self.learningRate, self.debug = num_freq, filter_row, filter_col, k1, learningRate, debug
# find num_training_vec, total_features, num_frames, num_classes, and l from the shape of the data
# and store them
self.storeParamsFromData()
# Set-up and store the input and output placeholders
x = tf.placeholder(tf.float32, [None, self.total_features])
y_ = tf.placeholder(tf.float32, [None, self.num_classes])
self.x = x
self.y_ = y_
# Setup and store tensor that performs the one-hot encoding
y_train_OHEnc = tf.one_hot(self.y_train.copy(), self.num_classes)
y_val_OHEnc = tf.one_hot(self.y_val.copy(), self.num_classes)
self.y_train_OHEnc = y_train_OHEnc
self.y_val_OHEnc = y_val_OHEnc
# create each lazy_property
# each lazy_property will add tensors to the graph
self.y_conv
self.cross_entropy
self.train_step
self.accuracy
# properties for use in debugging
if self.debug:
self.grads_and_vars
# print to the user that the network has been set up, along with its properties
print("Setting up Single Conv Layer Neural net with %g x %g filters, k1 = %g, learningRate = %g"%(filter_row, filter_col, k1, learningRate))
def storeParamsFromData(self):
'''
Calculate and store parameters from the raw data
total_features: The number of CQT coefficients total (incldues all context frames)
num_training_vec: The number of training examples in your dataset
num_frames: The number of context frames in each training example (total_features / num_freq)
num_classes: The number of songs we're distinguishing between in our output
l: The length of our second convolutional kernel - for now, its equal to num_frames
'''
# Neural-network model set-up
# calculating some values which will be nice as we set up the model
num_training_vec, total_features = self.X_train.shape
num_frames = int(total_features / self.num_freq)
print('-- Num frames: {}'.format(num_frames))
num_classes = int(max(self.y_train.max(), self.y_val.max()) + 1)
l = num_frames
# store what will be helpful later
self.total_features = total_features
self.num_training_vec = num_training_vec
self.num_frames = num_frames
self.num_classes = num_classes
self.l = l
@lazy_property
def y_conv(self):
# reshape the input into the form of a spectrograph
x_image = tf.reshape(self.x, [-1, self.num_freq, self.num_frames, 1])
x_image = tf.identity(x_image, name="x_image")
# first convolutional layer parameters
W_conv1 = init_weight_variable([self.filter_row, self.filter_col, 1, self.k1], "W_conv1")
b_conv1 = init_bias_variable([self.k1], "b_conv1")
# tensor that computes the output of the first convolutional layer
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
h_conv1 = tf.identity(h_conv1, name="h_conv_1")
# flatten out the output of the first convolutional layer to pass to the softmax layer
h_conv1_flat = tf.reshape(h_conv1, [-1, (self.num_freq - self.filter_row + 1) * (self.num_frames - self.filter_col + 1) * self.k1])
h_conv1_flat = tf.identity(h_conv1_flat, name="h_conv1_flat")
# softmax layer parameters
W_sm = init_weight_variable([(self.num_freq - self.filter_row + 1) * (self.num_frames - self.filter_col + 1) * self.k1, self.num_classes], "W_sm")
b_sm = init_bias_variable([self.num_classes], "b_sm")
# the output of the layer - un-normalized and without a non-linearity
# since cross_entropy_with_logits takes care of that
y_conv = tf.matmul(h_conv1_flat, W_sm) + b_sm
y_conv = tf.identity(y_conv, name="y_conv")
return y_conv # would want to softmax it to get an actual prediction
@lazy_property
def cross_entropy(self):
'''
Create a tensor that computes the cross entropy cost
Use the placeholder y_ as the labels, with input y_conv
Note that softmax_cross_entropy_with_logits takes care of normalizing
y_conv to make it a probability distribution
This tensor can be accessed using: self.cross_entropy
'''
cross_entropy = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(labels=self.y_, logits=self.y_conv))
cross_entropy = tf.identity(cross_entropy, name="cross_entropy")
return cross_entropy
@lazy_property
def optimizer(self):
'''
Create a tensor that represents the optimizer. This tensor can
be accessed using: self.optimizer
'''
optimizer = tf.train.AdamOptimizer(learning_rate = self.learningRate)
return optimizer
@lazy_property
def train_step(self):
'''
Creates a tensor that represents a single training step. This tensor
can be passed a feed_dict that has x and y_, and it will compute the gradients
and perform a single step.
This tensor can be accessed using: self.train_step
'''
return self.optimizer.minimize(self.cross_entropy)
@lazy_property
def accuracy(self):
'''
Create a tensor that computes the accuracy, using the placeholder y_ as the labeled data
and y_conv for the predictions of the network.
This tensor can be accessed using: self.accuracy
'''
correct_prediction = tf.equal(tf.argmax(self.y_conv, 1), tf.argmax(self.y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
return accuracy
'''
Properties that we'll use for debugging
'''
@lazy_property
def grads_and_vars(self):
grads_and_vars = self.optimizer.compute_gradients(self.cross_entropy, tf.trainable_variables())
return grads_and_vars
def train(self, batch_size, num_epochs, print_freq, debug_out='debug.txt'):
'''
Train the Network on the data that will have been loaded when the NN is initialized
Trained on: self.X_train, and a OH encoding of self.y_train
Trains with batch_size batches for num_epochs epochs
Debugging info is written to debug.txt (can add params to have more places to write out
to)
'''
# Starting an interactive session and initializing the parameters
#sess = tf.InteractiveSession(config=tf.ConfigProto(log_device_placement=True))
sess = tf.InteractiveSession()
sess.run(tf.global_variables_initializer())
# replace it with the one-hot encoded one --- should I replace?
y_trainOH = sess.run(self.y_train_OHEnc)[:, 0, :]
y_valOH = sess.run(self.y_val_OHEnc)[:, 0, :]
# lists to record accuracy at several points during training
train_acc_list = []
val_acc_list = []
train_acc_on_batch_list = []
# lists to record the error at several points during training
train_err_list = []
val_err_list = []
train_err_on_batch_list = []
# track which epochs you record data during
epoch_numbers = []
# record the start time
t_start = time.time()
for epoch in range(num_epochs):
epochStart = time.time()
# train by systematically pulling batches of batch_size from
# the training set and taking a training step on each batch
for i in range(0, self.num_training_vec, batch_size):
batch_end_point = min(i + batch_size, self.num_training_vec)
train_batch_data = self.X_train[i : batch_end_point]
train_batch_label = y_trainOH[i : batch_end_point]
self.train_step.run(feed_dict={self.x: train_batch_data, self.y_: train_batch_label})
epochEnd = time.time()
# print and record data now that we've trained on our full training set
if (epoch + 1) % print_freq == 0:
# timing for the measurements of cost and accuracy
evaluationStart = time.time()
# compute training (on the most recent batch and the full data set)
# and validation cost and accuracy, then print them and add them to the list
# we start with accuracy:
train_acc = self.evalByBatch(self.accuracy, X_train, y_trainOH, 5000)
train_acc_list.append(train_acc)
val_acc = self.evalByBatch(self.accuracy, X_val, y_valOH, 5000)
val_acc_list.append(val_acc)
# Now we compute the error on each set:
train_err = self.evalByBatch(self.cross_entropy, X_train, y_trainOH, 5000)
train_err_list.append(train_err)
val_err = self.evalByBatch(self.cross_entropy, X_val, y_valOH, 5000)
val_err_list.append(val_err)
# keep track of which epochs we have data for
epoch_numbers += [epoch]
# this marks the end of our evaluation
evaluationEnd = time.time()
# print a summary of our NN at this epoch
print("epoch: %d, time (train, evaluation): (%g, %g), t acc, v acc, t cost, v cost: %.5f, %.5f, %.5f, %.5f"%(epoch+1, epochEnd - epochStart, evaluationEnd - evaluationStart, train_acc, val_acc, train_err, val_err))
# debugging print outs
if self.debug:
# print out step / current value ratio for each parameter in our network
# based on training data from the most recent batch
# to the file with name debug_out
self.debug_WriteGradAndVar(train_batch_data, train_batch_label, epoch, debug_out)
# record the total time spent training the neural network
t_end = time.time()
print('--Time elapsed for training for %g epochs: %g'%(num_epochs, t_end - t_start))
# return the lists of logged data
return [train_acc_list, val_acc_list, train_err_list, val_err_list, epoch_numbers]
def evalByBatch(self, toEval, x, y_, batchSize):
weightedAvg = 0.0
for i in range(0, len(x), batchSize):
batch_end_point = min(i + batchSize, len(x))
batch_data = x[i : batch_end_point]
batch_label = y_[i : batch_end_point]
curAmount = toEval.eval(feed_dict={self.x: batch_data, self.y_: batch_label})
# weight by the length of the batch and keep adding on
weightedAvg = weightedAvg + curAmount * float(batch_end_point - i) / len(x)
return weightedAvg
def debug_WriteGradAndVar(self, xDebug, yDebug, epoch, debug_out):
'''
Helper function that prints the ratio of the training step that would be taken
on input data and labels xDebug and yDebug to the magnitude of each parameter
in the network. This gives us a sense of how much each parameter is changing.
Inputs:
xDebug: input data to calculate the gradient from
yDebug: labels for the input data
epoch: the number of the epoch (to print out to the file)
debug_out: the file to write to - if it doesn't exist it will be created
'''
file_object = open(debug_out, 'a+')
# record which epoch this is
file_object.write("Epoch: %d\n"%(epoch))
# find the current learning rate - this will be used with the gradient to find the step size
curLearningRate = self.optimizer._lr
# print each gradient and the variables they are associated with
# the gradients are stored in tuples, where the first element is a tensor
# that computes the gradient, and the second is the parameter that gradient
# is associated with
for gv in self.grads_and_vars:
curGrads = gv[0].eval(feed_dict={self.x: xDebug, self.y_: yDebug})
curSteps = curGrads * curLearningRate # scale down the graident by the learning rate
curVars = gv[1].eval()
# How much, compared to the magnitude of the weight, are we stepping
stepToVarRatio = np.absolute(np.divide(curSteps, curVars))
# print the name of the variable, then all the step ratios (step amount / current value)
# these values will have been averaged across the training examples
curName = gv[1].name
file_object.write("Variable: " + curName + "\n")
for index, step in np.ndenumerate(stepToVarRatio):
file_object.write(str(index) + ": " + str(step) + "\n")
# print summary statistics for this layer
maxVal = np.amax(stepToVarRatio)
thirdQuartile = np.percentile(stepToVarRatio, 75)
mean = np.mean(stepToVarRatio)
median = np.median(stepToVarRatio)
firstQuartile = np.percentile(stepToVarRatio, 25)
minVal = np.amin(stepToVarRatio)
file_object.write("Statistics: (%g, %g, %g, %g, %g, %g)\n"%(minVal, firstQuartile, median, mean, thirdQuartile, maxVal))
file_object.write("---------------------------------------\n")
# close the file
file_object.close()
def makeTrainingPlots(epochs, paramValues, trainingMetricLists, validationMetricLists, paramName, metricName, titles, filenames):
'''
Plots of the given training and validation metrics versus epoch number. One plot per list
in trainingMetricLists and validationMetricLists. Assume there will be the same number of sublists
in both those parameters. Titles will hold a list of strings that will be used for the titles
of the graphs. The last title will be for the plot with all the validation curves. Filenames is a list of filenames to save your plots to
Input:
epochs: a list of the epochs on which data was taken - assume all of them took
data at the same epoch numbers
paramValues: the values of the param that we were varying (to label the curves in our validation plot)
trainingMetricLists: a list of lists, where each list represents some metric on the progress of training throughout training
validationMetricLists: a list of lists, where each list represents some metric on the progress of training throughout training
paramName: name of the parameter you're varying (e.g. learningRate or kernel height)
metricName: the name of the metric (e.g. accuracy, or cross-entropy error), to be used on the y-axis
titles: titles for the graph (will include info on the params used).
*The last title will be for the validation plot
filename: the filenames to write the graphs to (will include info on the params used)
* the last filename will be for the validation plot
Output:
Write a png file for each list in trainingMetricLists/validationMetricLists with the desired plot
'''
# figure with all the validation curves
validationFig = plt.figure(figsize=(7, 4))
validationPlot = validationFig.add_subplot(111)
# go through each setup and make a plot for each
for i in range(len(trainingMetricLists)):
# pull out the list we're concerned with
trainingMetric = trainingMetricLists[i]
validationMetric = validationMetricLists[i]
# make the figure, add plots, axis lables, a title, and legend
fig = plt.figure(figsize=(7, 4))
myPlot = fig.add_subplot(111)
myPlot.plot(epochs, trainingMetric, '.', label="Training")
myPlot.plot(epochs, validationMetric, '.', label="Validation")
myPlot.set_xlabel("Epoch Number")
myPlot.set_ylabel(metricName)
myPlot.set_title(titles[i])
myPlot.legend(loc="best", frameon=False)
# Write the figure
fig.savefig(filenames[i])
# update the figure with all the validation curves
validationPlot.plot(epochs, validationMetric, '.', label=(paramName + " = " + str(paramValues[i])))
# finish labeling + write the validation plot
validationPlot.set_xlabel("Epoch Number")
validationPlot.set_ylabel(metricName)
validationPlot.set_title(titles[-1])
validationPlot.legend(loc="best", frameon=False)
validationFig.savefig(filenames[-1])
def makeBestResultPlot(paramValues, trainingMetricLists, validationMetricLists, bestFunction, paramName, metricName, title, filename):
'''
Plot the "best" value of the training and validation metric against the param that led to it
Best is assumed to be the largest value of the metric
Input:
trainingMetricLists: a list of lists, where each list represents some metric on the progress of training throughout training
validationMetricLists: a list of lists, where each list represents some metric on the progress of training throughout training
bestFunction: function that takes in a list (of values of the metric) and returns the "best" one. Often min or max will suffice
paramName: name of the parameter you varied in this experiment (e.g. height of kernel)
metricName: name of the metric you're using (e.g. cross-entropy error)
title: the title of the graph (will include info on the params used)
filename: the filename to write the graph to (will include info on the params used)
Output:
Write a png file with the desired plot
Is there a way to call the other one to do this? if didn't assume epoch number then yes - oh well
'''
bestTrainingMetrics = [bestFunction(curList) for curList in trainingMetricLists]
bestValidationMetrics = [bestFunction(curList) for curList in validationMetricLists]
# make the figure, add plots, axis lables, a title, and legend
fig = plt.figure(figsize=(7, 4))
myPlot = fig.add_subplot(111)
myPlot.plot(paramValues, bestTrainingMetrics, '.', label="Training")
myPlot.plot(paramValues, bestValidationMetrics, '.', label="Validation")
myPlot.set_xlabel(paramName)
myPlot.set_ylabel(metricName)
myPlot.set_title(title)
myPlot.legend(loc="best", frameon=False)
# Write the figure
fig.savefig(filename)
def makeEndResultPlot(paramValues, trainingMetricLists, validationMetricLists, paramName, metricName, title, filename):
'''
Plot the final value of the training and validation metric against the param that led to it
Input:
trainingMetricLists: a list of lists, where each list represents some metric on the progress of training throughout training
validationMetricLists: a list of lists, where each list represents some metric on the progress of training throughout training
paramName:
metricName:
title: the title of the graph (will include info on the params used)
filename: the filename to write the graph to (will include info on the params used)
Output:
Write a png file with the desired plot
Is there a way to call the other one to do this? if didn't assume epoch number then yes - oh well
'''
finalTrainingMetrics = [curList[-1] for curList in trainingMetricLists]
finalValidationMetrics = [curList[-1] for curList in validationMetricLists]
# make the figure, add plots, axis lables, a title, and legend
fig = plt.figure(figsize=(7, 4))
myPlot = fig.add_subplot(111)
myPlot.plot(paramValues, finalTrainingMetrics, label="Training")
myPlot.plot(paramValues, finalValidationMetrics, label="Validation")
myPlot.set_xlabel(paramName)
myPlot.set_ylabel(metricName)
myPlot.set_title(title)
myPlot.legend(loc="best", frameon=False)
# Write the figure
fig.savefig(filename)
'''
Our main, with 121x1 convolutional layer.
'''
# read in command line parameters
try:
filterColsString = sys.argv[1]
# map it from a string into a list of ints
filterColsIn = map(int, filterColsString.strip('[]').split(','))
# read in k1 as well
k1sString = sys.argv[2]
k1sIn = map(int, k1sString.strip('[]').split(','))
# read in the learning rates
learningRatesString = sys.argv[3]
learningRatesIn = map(float, learningRatesString.strip('[]').split(','))
# read in the number of epochs
numEpochs = int(sys.argv[4])
finalPlotName = sys.argv[5]
except Exception, e:
print('-- {}'.format(e))
# filepath to the data you want to laod
filepath = '/pylon2/ci560sp/cstrong/exp3/exp3_taylorswift_d15_1s_C1C8.mat'
# define the configurations we're going to be looking at
# in this exp: just change the number of rows in a vertical kernel
filterCols = filterColsIn
filterRows = [1] * len(filterColsIn)
k1s = k1sIn
learningRates = learningRatesIn
# set training parameters
batchSize = 1000
print_freq = 1
# make lists to store data
train_acc_lists = []
val_acc_lists = []
train_err_lists = []
val_err_lists = []
epoch_number_lists = []
# load data
[X_train, y_train, X_val, y_val] = loadData(filepath)
# loop through the setups and make a model each time
for i in range(len(filterRows)):
# create the model - this will create the TF graph as well as load the data
m = Model(169, X_train, y_train, X_val, y_val, filterRows[i], filterCols[i], k1s[i], learningRates[i], False)
# actually train the model (on the data it already loaded)
[train_acc_list, val_acc_list, train_err_list, val_err_list, epoch_numbers] = m.train(1000, numEpochs, print_freq)
# store the new data
train_acc_lists.append(train_acc_list)
val_acc_lists.append(val_acc_list)
train_err_lists.append(train_err_list)
val_err_lists.append(val_err_list)
epoch_number_lists.append(epoch_numbers)
del m # clear out the model to avoid huge buildup of memory
# print what you have so far in case it crashes
print("So far after %g models we have:"%(i+1))
print("Filter Rows: %s"%(filterRows))
print("Filter Cols: %s"%(filterCols))
print("K1s: %s"%(k1s))
print("Learning Rates: %s"%(learningRates))
print("Train acc list: %s"%(str(train_acc_lists)))
print("Val acc list: %s"%(str(val_acc_lists)))
print("Train err list: %s"%(str(train_err_lists)))
print("Val err list: %s"%(str(val_err_lists)))
print("Epoch number lists: %s"%(str(epoch_number_lists)))
# printing
print("Filter Rows: %s"%(filterRows))
print("Filter Cols: %s"%(filterCols))
print("K1s: %s"%(k1s))
print("Learning Rates: %s"%(learningRates))
print("Train acc list: %s"%(str(train_acc_lists)))
print("Val acc list: %s"%(str(val_acc_lists)))
print("Train err list: %s"%(str(train_err_lists)))
print("Val err list: %s"%(str(val_err_lists)))
print("Epoch number lists: %s"%(str(epoch_number_lists)))
# plotting
trainingPlotTitles = ['Single Layer CNN with %gx%g kernels, k1=%g and LR=%g'%(filterRows[i], filterCols[i], k1s[i], learningRates[i]) for i in range(len(filterRows))]
trainingPlotTitles.append('Exp 3c_4, Validation Cross-Entropy Cost vs. Epoch')
trainingPlotFiles = ['exp3c_4_training_%gx%g_k1=%g_LR=%f_%gEpochs.png'%(filterRows[i], filterCols[i], k1s[i], learningRates[i], numEpochs) for i in range(len(filterRows))]
trainingPlotFiles.append('exp3c_4_validationCurves_%gEpochs'%(numEpochs))
makeTrainingPlots(epoch_number_lists[0], k1s, train_err_lists, val_err_lists, "k1", "Cross Entropy Cost", trainingPlotTitles, trainingPlotFiles)
makeBestResultPlot(k1s, train_err_lists, val_err_lists, min, "k1", "Cross Entropy Cost", 'Cost vs. k1', finalPlotName)
| mit |
tomlof/scikit-learn | examples/exercises/plot_iris_exercise.py | 31 | 1622 | """
================================
SVM Exercise
================================
A tutorial exercise for using different SVM kernels.
This exercise is used in the :ref:`using_kernels_tut` part of the
:ref:`supervised_learning_tut` section of the :ref:`stat_learn_tut_index`.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets, svm
iris = datasets.load_iris()
X = iris.data
y = iris.target
X = X[y != 0, :2]
y = y[y != 0]
n_sample = len(X)
np.random.seed(0)
order = np.random.permutation(n_sample)
X = X[order]
y = y[order].astype(np.float)
X_train = X[:int(.9 * n_sample)]
y_train = y[:int(.9 * n_sample)]
X_test = X[int(.9 * n_sample):]
y_test = y[int(.9 * n_sample):]
# fit the model
for fig_num, kernel in enumerate(('linear', 'rbf', 'poly')):
clf = svm.SVC(kernel=kernel, gamma=10)
clf.fit(X_train, y_train)
plt.figure(fig_num)
plt.clf()
plt.scatter(X[:, 0], X[:, 1], c=y, zorder=10, cmap=plt.cm.Paired)
# Circle out the test data
plt.scatter(X_test[:, 0], X_test[:, 1], s=80, facecolors='none', zorder=10)
plt.axis('tight')
x_min = X[:, 0].min()
x_max = X[:, 0].max()
y_min = X[:, 1].min()
y_max = X[:, 1].max()
XX, YY = np.mgrid[x_min:x_max:200j, y_min:y_max:200j]
Z = clf.decision_function(np.c_[XX.ravel(), YY.ravel()])
# Put the result into a color plot
Z = Z.reshape(XX.shape)
plt.pcolormesh(XX, YY, Z > 0, cmap=plt.cm.Paired)
plt.contour(XX, YY, Z, colors=['k', 'k', 'k'],
linestyles=['--', '-', '--'], levels=[-.5, 0, .5])
plt.title(kernel)
plt.show()
| bsd-3-clause |
muLAn-project/muLAn | muLAn/models/grid_dmcmc.py | 1 | 59319 | # -*-coding:Utf-8 -*
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
# External libraries
# ----------------------------------------------------------------------
import sys
import os
# ----------------------------------------------------------------------
# Packages
# ----------------------------------------------------------------------
import os
import sys
import copy
import emcee
import pickle
import glob
import shutil
import datetime
import importlib
import subprocess
import numpy as np
from scipy import stats
from scipy import interpolate
from sklearn import linear_model
import muLAn.models as mulanmodels
import muLAn.packages.algebra as algebra
# ----------------------------------------------------------------------
# CLASS
# ----------------------------------------------------------------------
class printoption:
PURPLE = '\033[95m'
CYAN = '\033[96m'
DARKCYAN = '\033[36m'
BLUE = '\033[94m'
GREEN = '\033[92m'
YELLOW = '\033[93m'
RED = '\033[91m'
reset = '\033[0m'
bright = '\033[1m'
dim = '\033[2m'
underscore = '\033[4m'
blink = '\033[5m'
reverse = '\033[7m'
hidden = '\033[8m'
level0 = "\033[1m\033[31m"
level1 = "\033[1m"
good = "\033[32m"
# ----------------------------------------------------------------------
# Functions
# ----------------------------------------------------------------------
def help():
text = "grid_dmcmc - Differential Markov Chains Monte Carlo."
return text
# ----------------------------------------------------------------------
def bash_command(text):
proc = subprocess.Popen(text, shell=True, executable="/bin/bash")
proc.wait()
# ----------------------------------------------------------------------
# def update_progress(job_title, progress):
def update_progress(job_title, a, b):
length = 20
progress = float(a)/float(b)
block = int(round(length*progress))
msg = "\r {0}: [{1}] {2:3.0f}% --> {3:d} / {4:d}".format(job_title, "*"*block + "-"*(length-block), round(progress*100, 2), a, b)
if progress >= 1: msg = msg + " \033[1m\033[32mDONE\033[0m\r\n"
sys.stdout.write(msg)
sys.stdout.flush()
# ----------------------------------------------------------------------
def update_progress_grid(a, b, c, d):
length = 10
progress = float(a)/float(b)
progress_grid = float(c) / float(d)
block = int(round(length*progress))
block_grid = int(round(length * progress_grid))
msg = "\r Grid\033[34m+MCMC\033[0m: {2:4d} / {3:4d} <-- {4:3.0f}% [{0}]\033[34m[{1}] {5:3.0f}% --> {6:d} / {7:d}\033[0m\r".format("*"*block_grid + "-"*(length-block_grid),\
"*" * block + "-" * (length - block), c, d, round(progress_grid * 100, 2), round(progress*100, 2), a, b)
sys.stdout.write(msg)
sys.stdout.flush()
# ----------------------------------------------------------------------
def communicate(cfg, verbose, text, opts=False, prefix=False, newline=False, tab=False):
if cfg.getint('Modelling', 'Verbose') >= verbose:
if prefix:
text = "[muLAn] " + text
if opts!=False:
text2=''
for a in opts:
text2 = text2 + a
text = text2 + text + printoption.reset
if tab:
text = " " + text
if newline:
text = "\n" + text
print(text)
else:
if tab:
text = " " + text
if newline:
text = "\n" + text
print(text)
# ----------------------------------------------------------------------
def zipdir(path, ziph):
# ziph is zipfile handle
for root, dirs, files in os.walk(path):
for file in files:
ziph.write(os.path.join(root, file))
# ----------------------------------------------------------------------
def combin(p):
Np = len(p)
Ng = 1
for m in range(0,Np):
Ng *= len(p[m])
gridlist = np.zeros((Np,Ng), dtype='f8') # to test
# gridlist = np.zeros((Np,Ng))
Nr = Ng
for m in range(0,Np):
Nr = Nr/len(p[m])
q = 0
l = 0
for k in range(0,Ng/Nr):
for n in range(0,Nr):
gridlist[m][q] = p[m][l%len(p[m])]
q += 1
l += 1
return gridlist
# ----------------------------------------------------------------------
def binrot(alpha, tau, beta, s, q):
"""Source position over the time. The general conventions used are
the same as in appendix A in [1].
Arguments:
alpha -- the angle between the lens symmetry axis and the source
trajectory;
tau -- the time component of the source position;
beta -- the component of the source position orthogonal to tau;
s -- the primary-secondary distance;
q -- the secondary-primary mass ratio.
Returns:
x -- numpy array including the x component of the source relative to
the lens center of mass (CM);
y -- numpy array including the y component of the source relative to
the CM.
References:
[1] Skowron et al. 2011, 738, 87.
"""
tau_chap = np.array([np.cos(alpha), np.sin(alpha)])
beta_chap = np.array([-np.sin(alpha), np.cos(alpha)])
lenssource = np.array([tau[i] * tau_chap + beta[i] * beta_chap for i in range(len(tau))])
gl1 = s * q/(1+q) * np.array([1, 0])
lenssource = lenssource - gl1
return lenssource.T[0], lenssource.T[1]
# ----------------------------------------------------------------------
def test_blending(mb_lim, g_lim, fs, fb, time_serie, cond2):
g_mod = fb/fs
mb_mod = 18.0 - 2.5*np.log10(fs+fb)
# Blending ok
cond = (g_lim[0] < g_mod) & (g_mod < g_lim[1]) & (mb_lim[0] < mb_mod) & (mb_mod < mb_lim[1])
if cond:
fs_new = fs
fb_new = fb
# Corners C1
cond = (g_lim[1] < g_mod) & (mb_lim[1] < mb_mod)
if cond:
fs_new = (10**((18.0-mb_lim[1])/2.5))/(1+g_lim[1])
fb_new = g_lim[1] * fs_new
# Corners C2
cond = (g_lim[1] < g_mod) & (mb_mod < mb_lim[0])
if cond:
fs_new = (10**(18.0-mb_lim[0]))/(1+g_lim[1])
fb_new = g_lim[1] * fs_new
# Corners C3
cond = (g_mod < g_lim[0]) & (mb_mod < mb_lim[0])
if cond:
fs_new = (10**(18.0-mb_lim[0]))/(1+g_lim[0])
fb_new = g_lim[0] * fs_new
# Corners C4
cond = (g_mod < g_lim[0]) & (mb_lim[1] < mb_mod)
if cond:
fs_new = (10**((18.0-mb_lim[1])/2.5))/(1+g_lim[0])
fb_new = g_lim[0] * fs_new
# Boundary B1
cond = (g_lim[0] < g_mod) & (g_mod < g_lim[1]) & (mb_lim[1] < mb_mod)
if cond:
x = np.atleast_2d(time_serie['amp'][cond2] - 1.0)
y = np.atleast_2d(time_serie['flux'][cond2] - 10 ** ((18.0 - mb_lim[1]) / 2.5))
regr = linear_model.LinearRegression(fit_intercept=False)
regr.fit(x, y)
fs_new = regr.coef_[0][0]
fb_new = 10 ** ((18.0 - mb_lim[1]) / 2.5) - fs_new
# Boundary B2
cond = (g_lim[1] < g_mod) & (mb_lim[0] < mb_mod) & (mb_mod < mb_lim[1])
if cond:
x = np.atleast_2d(time_serie['amp'][cond2] + g_lim[1]).T
y = np.atleast_2d(time_serie['flux'][cond2]).T
regr = linear_model.LinearRegression(fit_intercept=False)
regr.fit(x, y)
fs_new = regr.coef_[0][0]
fb_new = g_lim[1]*fs_new
# Boundary B3
cond = (g_lim[0] < g_mod) & (g_mod < g_lim[1]) & (mb_mod < mb_lim[0])
if cond:
x = np.atleast_2d(time_serie['amp'][cond2] - 1.0)
y = np.atleast_2d(time_serie['flux'][cond2] - 10 ** ((18.0 - mb_lim[0]) / 2.5))
regr = linear_model.LinearRegression(fit_intercept=False)
regr.fit(x, y)
fs_new = regr.coef_[0][0]
fb_new = 10 ** ((18.0 - mb_lim[0]) / 2.5) - fs_new
# Boundary B4
cond = (g_mod < g_lim[0]) & (mb_lim[0] < mb_mod) & (mb_mod < mb_lim[1])
if cond:
x = np.atleast_2d(time_serie['amp'][cond2] + g_lim[0]).T
y = np.atleast_2d(time_serie['flux'][cond2]).T
regr = linear_model.LinearRegression(fit_intercept=False)
regr.fit(x, y)
fs_new = regr.coef_[0][0]
fb_new = g_lim[0]*fs_new
return fs_new, fb_new
# ----------------------------------------------------------------------
# def sort_on_runtime(pos):
# print(pos)
# p = np.atleast_2d(pos)
# idx = np.argsort(p[:, 3])#[::-1]
# #print(idx)
# return p[idx], idx
# ----------------------------------------------------------------------
def lnprior(param_model):
p = 0
if param_model['t0'] < 0:
p = 1e12
if param_model['rho'] < 0:
p = 1e12
if param_model['rho'] > 1.0:
p = 1e12
if param_model['tE'] < 1e-10:
p = 1e12
if param_model['q'] < 1e-9:
p = 1e12
# if param_model['q'] > 1.0:
# p = 1e12
if param_model['s'] < 1e-10:
p = 1e12
if param_model['s'] > 10:
p = 1e12
return p
# ----------------------------------------------------------------------
def lnprob(theta, time_serie, model_params, fitted_param, nuisance, models_names,
interpol_method, tb, cfgsetup):
# print(theta[2])
#import modulesloading as load_modules
#models, y = load_modules.main()
#models = {models_names[i] : models[i] for i in range(len(models_names))}
models = dict()
for i in range(len(models_names)):
text = 'muLAn.models.{:s}'.format(models_names[i])
importlib.import_module(text)
models.update({models_names[i]: getattr(mulanmodels, models_names[i])})
# print(res)
# -----------
# print(models)
# print(models_names)
# sys.exit()
# print(models)
# print("Hello, c'est moi")
flag_fix_gamma = 1
key_list = np.array([])
for key, value in fitted_param.iteritems():
key_list = np.append(key_list, key)
param_model = nuisance
id=0
cond = (key_list=='t0')
if cond.sum()==1:
param_model.update({'t0' : theta[id]})
id=id+1
cond = (key_list=='u0')
if cond.sum()==1:
param_model.update({'u0' : theta[id]})
id=id+1
cond = (key_list=='tE')
if cond.sum()==1:
param_model.update({'tE' : theta[id]})
id=id+1
cond = (key_list=='rho')
if cond.sum()==1:
param_model.update({'rho' : theta[id]})
id=id+1
cond = (key_list=='gamma')
if cond.sum()==1:
param_model.update({'gamma' : theta[id]})
flag_fix_gamma = 0
id=id+1
cond = (key_list=='piEE')
if cond.sum()==1:
param_model.update({'piEE' : theta[id]})
id=id+1
cond = (key_list=='piEN')
if cond.sum()==1:
param_model.update({'piEN' : theta[id]})
id=id+1
cond = (key_list=='s')
if cond.sum()==1:
param_model.update({'s' : theta[id]})
id=id+1
cond = (key_list=='q')
if cond.sum()==1:
if theta[id] < 1.0:
param_model.update({'q' : theta[id]})
else:
try:
param_model.update({'q' : 1.0 / theta[id]})
except:
param_model.update({'q' : theta[id]})
id=id+1
cond = (key_list=='alpha')
if cond.sum()==1:
param_model.update({'alpha' : theta[id]})
id=id+1
cond = (key_list == 'dalpha')
if cond.sum()==1:
param_model.update({'dalpha' : theta[id]})
id=id+1
cond = (key_list == 'ds')
if cond.sum()==1:
param_model.update({'ds' : theta[id]})
# id=id+1
# Evaluate priors
chi2 = 0
lnprior_curr = lnprior(param_model)
if lnprior_curr < 1e11:
# print("Amplification, tu veux ?")
# Calculation of the amplification
observatories = np.unique(time_serie['obs'])
models_lib = np.unique(time_serie['model'])
for j in range(len(observatories)):
cond2 = (time_serie['obs']==observatories[j])
#print(observatories[j])
if flag_fix_gamma:
param_model.update({'gamma': time_serie['gamma'][cond2][0]})
for i in range(models_lib.shape[0]):
#print(models_lib[i])
cond = (time_serie['model'] == models_lib[i]) & (time_serie['obs']==observatories[j])\
& (time_serie['interpol'] == '0')
if cond.sum() > 0:
time_serie_export = time_serie['dates'][cond]
DsN_export = time_serie['DsN'][cond]
DsE_export = time_serie['DsE'][cond]
Ds_export = dict({'N':DsN_export, 'E':DsE_export})
try:
kwargs_method = dict(cfgsetup.items(models_lib[i]))
except:
kwargs_method = dict()
amp = models[models_lib[i]].magnifcalc(time_serie_export, param_model, Ds=Ds_export, tb=tb, **kwargs_method)
time_serie['amp'][cond] = amp
del amp
# Interpolation method
# -------------------------------------------------------------------------
key_list = [key for key in interpol_method]
if len(key_list) > 0:
for i in range(len(key_list)):
time_serie_export = interpol_method[key_list[i]][0]
DsN_export = interpol_method[key_list[i]][1]
DsE_export = interpol_method[key_list[i]][2]
Ds_export = dict({'N':DsN_export, 'E':DsE_export})
name = key_list[i].split('#')[1]
try:
kwargs_method = dict(cfgsetup.items(name))
except:
kwargs_method = dict()
amp = models[name].magnifcalc(time_serie_export, param_model, Ds=Ds_export, tb=tb, **kwargs_method)
# print(amp)
interpol_method[key_list[i]][3] = amp
interpol_func = interpolate.interp1d(time_serie_export, amp, kind='linear')
# interpol_func.update({key_list[i]: interpolate.interp1d(time_serie_export, amp)})
cond = (time_serie['interpol'] == key_list[i])
if cond.sum() > 0:
amp = interpol_func(time_serie['dates'][cond])
time_serie['amp'][cond] = amp
# Source and blending fluxes.
# -------------------------------------------------------------------------
for j in range(len(observatories)):
cond2 = (time_serie['obs']==observatories[j])
#print(observatories[j])
# Calculation of fs and fb
# fs, fb = algebra.fsfb(time_serie, cond2, blending=True)
fs, fb = algebra.fsfbwsig(time_serie, cond2, blending=True)
# Relevance of blending for OGLE
# if (observatories[j]=="ogle-i"):
# mb_lim = [17.25, 17.36]
# g_lim = [0.0, 10.0]
# fs, fb = test_blending(mb_lim, g_lim, fs, fb, time_serie, cond2)
time_serie['fs'][cond2] = fs
time_serie['fb'][cond2] = fb
if (np.abs(fs) == np.inf) | (np.abs(fb) == np.inf):
lnprior_curr = - np.inf
# print("Amplification, tu as...")
# Calculation of chi2
# print(param_model, time_serie['amp'])
if lnprior_curr < 1e11:
time_serie['flux_model'] = time_serie['amp']*time_serie['fs'] + time_serie['fb']
time_serie['chi2pp'] = np.power((time_serie['flux']-time_serie['flux_model'])/time_serie['err_flux'], 2)
chi2 = np.sum(time_serie['chi2pp'])
result = - chi2/2.0 - lnprior_curr
else:
time_serie['flux_model'] = np.ones(len(time_serie['amp']))
time_serie['chi2pp'] = np.ones(len(time_serie['amp']))*1e12
result = -1e12
else:
time_serie['flux_model'] = np.ones(len(time_serie['amp']))
time_serie['chi2pp'] = np.ones(len(time_serie['amp']))*1e12
result = -1e12
if (chi2 < 1e-3) | (chi2 == np.inf):
time_serie['flux_model'] = np.ones(len(time_serie['amp']))
time_serie['chi2pp'] = np.ones(len(time_serie['amp']))*1e12
result = -1e12
return result
# ----------------------------------------------------------------------
def ini_chains_gene(fitted_param, nwalkers, params):
result = []
key_list = np.array([key for key in fitted_param])
key_list_order = np.array(['t0', 'u0', 'tE', 'rho', 'gamma', 'piEE', 'piEN', 's', 'q', 'alpha', 'dalpha', 'ds'])
intersection = np.intersect1d(key_list_order, key_list)
key_list = [key for key in key_list_order if (len(np.where(intersection==key)[0])>0)]
l = 0
while(l<nwalkers):
table = np.array([])
for key in key_list:
if l==0: table = np.append(table, fitted_param[key])
else:
a = fitted_param[key] - abs(float(params[key][1]))
b = fitted_param[key] + abs(float(params[key][2]))
x = (np.max([a, b]) - np.min([a, b])) * np.random.random_sample() + np.min([a, b])
table = np.append(table, x)
result.append(table)
l = l + 1
return result
# ----------------------------------------------------------------------
# Differential MCMC
# ----------------------------------------------------------------------
def search(cfgsetup=False, models=False, model_param=False, time_serie=False,\
model2load=False, interpol_method=False):
# ==================================================================
# Preparing MCMC
# ==================================================================
# Emergency Stop initialization
if os.path.exists(cfgsetup.get('FullPaths', 'Event') + '.emergencystop'):
os.remove(cfgsetup.get('FullPaths', 'Event') + '.emergencystop')
file = open(cfgsetup.get('FullPaths', 'Event') + '.emergencystop', 'w')
file.write('0')
file.close()
fn_lock = cfgsetup.get('FullPaths', 'Event') + '.lock'
if not os.path.exists(fn_lock): open(fn_lock, 'w').close()
# Parameter to be fitted / Nuisance parameters
params = {
't0' : np.array([a.strip() for a in cfgsetup.get('Modelling',
't0').split(',')]),\
'u0' : np.array([a.strip() for a in cfgsetup.get('Modelling',
'u0').split(',')]),\
'tE' : np.array([a.strip() for a in cfgsetup.get('Modelling',
'tE').split(',')]),\
'rho' : np.array([a.strip() for a in cfgsetup.get('Modelling',
'rho').split(',')]),\
'gamma' : np.array([a.strip() for a in cfgsetup.get('Modelling',
'gamma').split(',')]),\
'piEE' : np.array([a.strip() for a in cfgsetup.get('Modelling',
'piEE').split(',')]),\
'piEN' : np.array([a.strip() for a in cfgsetup.get('Modelling',
'piEN').split(',')]),\
's' : np.array([a.strip() for a in cfgsetup.get('Modelling',
's').split(',')]),\
'q' : np.array([a.strip() for a in cfgsetup.get('Modelling',
'q').split(',')]),\
'alpha' : np.array([a.strip() for a in cfgsetup.get('Modelling',
'alpha').split(',')]),\
'dalpha': np.array([a.strip() for a in cfgsetup.get('Modelling', 'dalpha').split(',')]),\
'ds': np.array([a.strip() for a in cfgsetup.get('Modelling', 'ds').split(',')])\
}
# Files
path = cfgsetup.get('FullPaths', 'Event')\
+ cfgsetup.get('RelativePaths', 'ModelsHistory')\
+ cfgsetup.get('Controls', 'Archive')\
+ '-ModelsSummary.txt'
if os.path.exists(path): os.remove(path)
sys.path.insert(0, cfgsetup.get('FullPaths', 'Code') + 'packages/')
if (cfgsetup.getboolean('FitSetupDMCMC', 'Resume')==False):
path = cfgsetup.get('FullPaths', 'Event') + cfgsetup.get('RelativePaths', 'Chains')
shutil.rmtree(path)
if not os.path.exists(path): os.makedirs(path)
for i in range(cfgsetup.getint('FitSetupDMCMC', 'Chains')):
filename4chains = path + cfgsetup.get('Controls', 'Archive')\
+ '-c{:04d}'.format(i) + '.txt'
file_chains = open(filename4chains, 'a')
format4chains = '# Exploration of chain n°{:d}.\n'.format(i)\
+ '#{:>9} '.format('ID')\
+ '{:>17} '.format('t0')\
+ '{:>17} '.format('u0')\
+ '{:>17} '.format('tE')\
+ '{:>17} '.format('rho')\
+ '{:>17} '.format('gamma')\
+ '{:>17} '.format('piEN')\
+ '{:>17} '.format('piEE')\
+ '{:>17} '.format('s')\
+ '{:>17} '.format('q')\
+ '{:>17} '.format('alpha')\
+ '{:>17} '.format('dalpha')\
+ '{:>17} '.format('ds')\
+ '{:>17} '.format('chi2')\
+ '{:>7} '.format('accrate')\
+ '{:>8} '.format('date')\
+ '{:>6} '.format('hour')\
+ '{:>17} '.format('chi2/dof')\
+ '\n'
file_chains.write(format4chains)
file_chains.close()
accrate_loaded = np.array([])
id_loaded = np.array([])
else:
path = cfgsetup.get('FullPaths', 'Event') + cfgsetup.get('RelativePaths', 'Chains')
if not os.path.exists(path):
text = "\n\033[1m\033[91mDirectory with chains is missing in 'Resume' mode. muLAn killed.\033[0m"
sys.exit(text)
else:
path = cfgsetup.get('FullPaths', 'Event') + cfgsetup.get('RelativePaths', 'Chains')
fnames_chains = glob.glob(path + cfgsetup.get('Controls', 'Archive') + "*-c*.txt")
fnames_chains_exclude = glob.glob(path + cfgsetup.get('Controls', 'Archive') + "*g*.txt")
temp = []
for a in fnames_chains:
if (a in fnames_chains_exclude) == False:
temp.append(a)
fnames_chains = copy.deepcopy(temp)
del temp, fnames_chains_exclude
nb_chains = len(fnames_chains)
if nb_chains != cfgsetup.getint("FitSetupDMCMC", "Chains"):
text = "\n\033[1m\033[91mThe number of chains does not fit in 'Resume' mode. muLAn killed.\033[0m"
sys.exit(text)
samples_file = dict({'chi2': [], 't0': [], 'u0': [], 'tE': [], 'rho': [], 'gamma': [], 'piEE': [], 'piEN': [], 's': [], 'q': [], 'alpha': [], 'dalpha': [], 'ds': [], 'chain': [], 'fullid': [], 'date_save': [], 'time_save': [], 'id': [], 'accrate': [], 'chi2/dof': []})
accrate_loaded = np.array([])
id_loaded = np.array([])
for i in range(nb_chains):
file = open(fnames_chains[i], 'r')
for line in file:
params_model = line
if params_model[0] == '#':
continue
samples_file['id'].append(int([a for a in (params_model.split('\n')[0].split(' ')) if (a != '')][0]))
samples_file['t0'].append(float([a for a in (params_model.split('\n')[0].split(' ')) if (a != '')][1]))
samples_file['u0'].append(float([a for a in (params_model.split('\n')[0].split(' ')) if (a != '')][2]))
samples_file['tE'].append(float([a for a in (params_model.split('\n')[0].split(' ')) if (a != '')][3]))
samples_file['rho'].append(float([a for a in (params_model.split('\n')[0].split(' ')) if (a != '')][4]))
samples_file['gamma'].append(float([a for a in (params_model.split('\n')[0].split(' ')) if (a != '')][5]))
samples_file['piEN'].append(float([a for a in (params_model.split('\n')[0].split(' ')) if (a != '')][6]))
samples_file['piEE'].append(float([a for a in (params_model.split('\n')[0].split(' ')) if (a != '')][7]))
samples_file['s'].append(float([a for a in (params_model.split('\n')[0].split(' ')) if (a != '')][8]))
samples_file['q'].append(float([a for a in (params_model.split('\n')[0].split(' ')) if (a != '')][9]))
samples_file['alpha'].append(float([a for a in (params_model.split('\n')[0].split(' ')) if (a != '')][10]))
samples_file['dalpha'].append(float([a for a in (params_model.split('\n')[0].split(' ')) if (a != '')][11]))
samples_file['ds'].append(float([a for a in (params_model.split('\n')[0].split(' ')) if (a != '')][12]))
samples_file['chi2'].append(float([a for a in (params_model.split('\n')[0].split(' ')) if (a != '')][13]))
samples_file['accrate'].append(float([a for a in (params_model.split('\n')[0].split(' ')) if (a != '')][14]))
samples_file['date_save'].append(int([a for a in (params_model.split('\n')[0].split(' ')) if (a != '')][15]))
samples_file['time_save'].append([a for a in (params_model.split('\n')[0].split(' ')) if (a != '')][16])
samples_file['chi2/dof'].append(float([a for a in (params_model.split('\n')[0].split(' ')) if (a != '')][17]))
samples_file['chain'].append(int(fnames_chains[i][-8:-4]))
samples_file['fullid'].append(-1)
file.close()
accrate_loaded = np.append(accrate_loaded, samples_file['accrate'][-1])
id_loaded = np.append(id_loaded, samples_file['id'][-1])
filename = cfgsetup.get('FullPaths', 'Event') + cfgsetup.get('RelativePaths', 'Chains')\
+ cfgsetup.get('Controls', 'Archive') + "-lastposition.p"
file = open(filename, "r")
pos_pickle = pickle.load(file)
file.close()
filename = cfgsetup.get('FullPaths', 'Event') + cfgsetup.get('RelativePaths', 'Chains')\
+ cfgsetup.get('Controls', 'Archive') + "-rgenerator.p"
file = open(filename, "r")
rgenerator_piclke = pickle.load(file)
file.close()
del samples_file
# Prepare the grids
grid_params = np.array([])
format = 'import numpy as np\nimport pickle\ntab = np.array(['
i = 0
if params['t0'][0]=="gri":
i = i + 1
grid_params = np.append(grid_params, ['t0'])
format = format + 'np.linspace({0:s}, {1:s}, {2:s}),'.format(params['t0'][1], params['t0'][2], params['t0'][3])
if params['u0'][0]=="gri":
i = i + 1
grid_params = np.append(grid_params, ['u0'])
format = format + 'np.linspace({0:s}, {1:s}, {2:s}),'.format(params['u0'][1], params['u0'][2], params['u0'][3])
if params['tE'][0]=="gri":
i = i + 1
grid_params = np.append(grid_params, ['tE'])
format = format + 'np.linspace({0:s}, {1:s}, {2:s}),'.format(params['tE'][1], params['tE'][2], params['tE'][3])
if params['rho'][0]=="gri":
i = i + 1
grid_params = np.append(grid_params, ['rho'])
format = format + 'np.linspace({0:s}, {1:s}, {2:s}),'.format(params['rho'][1], params['rho'][2], params['rho'][3])
if params['gamma'][0]=="gri":
i = i + 1
grid_params = np.append(grid_params, ['gamma'])
format = format + 'np.linspace({0:s}, {1:s}, {2:s}),'.format(params['gamma'][1], params['gamma'][2], params['gamma'][3])
if params['piEE'][0]=="gri":
i = i + 1
grid_params = np.append(grid_params, ['piEE'])
format = format + 'np.linspace({0:s}, {1:s}, {2:s}),'.format(params['piEE'][1], params['piEE'][2], params['piEE'][3])
if params['piEN'][0]=="gri":
i = i + 1
grid_params = np.append(grid_params, ['piEN'])
format = format + 'np.linspace({0:s}, {1:s}, {2:s}),'.format(params['piEN'][1], params['piEN'][2], params['piEN'][3])
if params['s'][0]=="gri":
i = i + 1
grid_params = np.append(grid_params, ['s'])
a = float(params['s'][1])
b = float(params['s'][2])
if (a > 0) & (b > 0):
format = format + 'np.logspace({0:.10e}, {1:.10e}, {2:s}),'.format(np.log10(a), np.log10(b), params['s'][3])
else:
sys.exit('Please enter a positive value for s.')
if params['q'][0]=="gri":
i = i + 1
grid_params = np.append(grid_params, ['q'])
a = float(params['q'][1])
b = float(params['q'][2])
if (a > 0) & (b > 0):
format = format + 'np.logspace({0:.10e}, {1:.10e}, {2:s}),'.format(np.log10(a), np.log10(b), params['q'][3])
else:
sys.exit('Please enter a positive value for q.')
if params['alpha'][0]=="gri":
i = i + 1
grid_params = np.append(grid_params, ['alpha'])
format = format + 'np.linspace({0:s}, {1:s}, {2:s}),'.format(params['alpha'][1], params['alpha'][2], params['alpha'][3])
if params['dalpha'][0]=="gri":
i = i + 1
grid_params = np.append(grid_params, ['dalpha'])
format = format + 'np.linspace({0:s}, {1:s}, {2:s}),'.format(params['dalpha'][1], params['dalpha'][2], params['dalpha'][3])
if params['ds'][0]=="gri":
i = i + 1
grid_params = np.append(grid_params, ['ds'])
format = format + 'np.linspace({0:s}, {1:s}, {2:s}),'.format(params['ds'][1], params['ds'][2], params['ds'][3])
format = format[:-1] + '])\n'
format = format + 'file_save = open("' + cfgsetup.get('FullPaths', 'Code')\
+ 'tmp.p", "w")\npickle.dump(tab, file_save)\nfile_save.close()\n'
filename = cfgsetup.get('FullPaths', 'Code') + 'temp_grid.py'
file_temp = open(filename, 'w')
file_temp.write(format)
file_temp.close()
flag_grid_yes = 1
if i>0:
execfile(filename)
filename = cfgsetup.get('FullPaths', 'Code') + 'tmp.p'
file = open(filename, 'r')
grid_values = pickle.load(file)
file.close()
os.remove(filename)
grid_values_combined = combin(grid_values)
nb_params_grid = len(grid_values_combined)
lengh_grid = len(grid_values_combined.T)
else:
nb_params_grid = 1
lengh_grid = 1
flag_grid_yes = 0
filename = cfgsetup.get('FullPaths', 'Code') + 'temp_grid.py'
os.remove(filename)
# Prepare the DMCMC
# print(lengh_grid)
t0_best = np.empty(cfgsetup.getint('FitSetupDMCMC', 'Chains'), 'f8')
u0_best = np.empty(cfgsetup.getint('FitSetupDMCMC', 'Chains'), 'f8')
tE_best = np.empty(cfgsetup.getint('FitSetupDMCMC', 'Chains'), 'f8')
rho_best = np.empty(cfgsetup.getint('FitSetupDMCMC', 'Chains'), 'f8')
gamma_best = np.empty(cfgsetup.getint('FitSetupDMCMC', 'Chains'), 'f8')
piEN_best = np.empty(cfgsetup.getint('FitSetupDMCMC', 'Chains'), 'f8')
piEE_best = np.empty(cfgsetup.getint('FitSetupDMCMC', 'Chains'), 'f8')
s_best = np.empty(cfgsetup.getint('FitSetupDMCMC', 'Chains'), 'f8')
q_best = np.empty(cfgsetup.getint('FitSetupDMCMC', 'Chains'), 'f8')
alpha_best = np.empty(cfgsetup.getint('FitSetupDMCMC', 'Chains'), 'f8')
dalpha_best = np.empty(cfgsetup.getint('FitSetupDMCMC', 'Chains'), 'f8')
ds_best = np.empty(cfgsetup.getint('FitSetupDMCMC', 'Chains'), 'f8')
lnprob_best = np.empty(cfgsetup.getint('FitSetupDMCMC', 'Chains'), 'f8')
accrate_best = np.empty(cfgsetup.getint('FitSetupDMCMC', 'Chains'), 'f8')
date_best = np.empty(cfgsetup.getint('FitSetupDMCMC', 'Chains'), 'S8')
hour_best = np.empty(cfgsetup.getint('FitSetupDMCMC', 'Chains'), 'S6')
for id_grid in range(lengh_grid):
# if flag_grid_yes:
# text = '\nGrid: {:d} / {:d}'.format(id_grid+1, lengh_grid)
# communicate(cfgsetup, 1, text)
# update_progress_grid("Grid progression", id_grid+1, lengh_grid)
if flag_grid_yes:
node = dict()
for id2_grid in range(nb_params_grid):
node.update({grid_params[id2_grid] : grid_values_combined.T[id_grid][id2_grid]})
path = cfgsetup.get('FullPaths', 'Event') + cfgsetup.get('RelativePaths', 'Chains')
for i in range(cfgsetup.getint('FitSetupDMCMC', 'Chains')):
filename4chains = path + cfgsetup.get('Controls', 'Archive')\
+ '-c{:04d}'.format(i) + '-g{:d}'.format(id_grid) + '.txt'
file_chains = open(filename4chains, 'a')
format4chains = '# Exploration of chain n°{:d}.\n'.format(i)\
+ '#{:>9} '.format('ID')\
+ '{:>17} '.format('t0')\
+ '{:>17} '.format('u0')\
+ '{:>17} '.format('tE')\
+ '{:>17} '.format('rho')\
+ '{:>17} '.format('gamma')\
+ '{:>17} '.format('piEN')\
+ '{:>17} '.format('piEE')\
+ '{:>17} '.format('s')\
+ '{:>17} '.format('q')\
+ '{:>17} '.format('alpha')\
+ '{:>17} '.format('dalpha')\
+ '{:>17} '.format('ds')\
+ '{:>17} '.format('chi2')\
+ '{:>7} '.format('accrate')\
+ '{:>8} '.format('date')\
+ '{:>6} '.format('hour')\
+ '{:>17} '.format('chi2/dof')\
+ '\n'
file_chains.write(format4chains)
file_chains.close()
# Prepare the DMCMC
nuisance = dict()
fitted_param = dict()
result = np.array([])
if params['t0'][0]!="fit":
if params['t0'][0]=="gri":
nuisance.update({'t0': node['t0']})
else:
nuisance.update({'t0': params['t0'][3].astype(np.float64)})
else:
fitted_param.update({'t0': params['t0'][3].astype(np.float64)})
result = np.append(result, fitted_param['t0'])
if params['u0'][0]!="fit":
if params['u0'][0]=="gri":
nuisance.update({'u0': node['u0']})
else:
nuisance.update({'u0': params['u0'][3].astype(np.float64)})
else:
fitted_param.update({'u0': params['u0'][3].astype(np.float64)})
result = np.append(result, fitted_param['u0'])
if params['tE'][0]!="fit":
if params['tE'][0]=="gri":
nuisance.update({'tE': node['tE']})
else:
nuisance.update({'tE': params['tE'][3].astype(np.float64)})
else:
fitted_param.update({'tE': params['tE'][3].astype(np.float64)})
result = np.append(result, fitted_param['tE'])
if params['rho'][0]!="fit":
if params['rho'][0]=="gri":
nuisance.update({'rho': node['rho']})
else:
nuisance.update({'rho': params['rho'][3].astype(np.float64)})
else:
fitted_param.update({'rho': params['rho'][3].astype(np.float64)})
result = np.append(result, fitted_param['rho'])
if params['gamma'][0]!="fit":
if params['gamma'][0]=="gri":
nuisance.update({'gamma': node['gamma']})
else:
nuisance.update({'gamma': params['gamma'][3].astype(np.float64)})
else:
fitted_param.update({'gamma': params['gamma'][3].astype(np.float64)})
result = np.append(result, fitted_param['gamma'])
if params['piEE'][0]!="fit":
if params['piEE'][0]=="gri":
nuisance.update({'piEE': node['piEE']})
else:
nuisance.update({'piEE': params['piEE'][3].astype(np.float64)})
else:
fitted_param.update({'piEE': params['piEE'][3].astype(np.float64)})
result = np.append(result, fitted_param['piEE'])
if params['piEN'][0]!="fit":
if params['piEN'][0]=="gri":
nuisance.update({'piEN': node['piEN']})
else:
nuisance.update({'piEN': params['piEN'][3].astype(np.float64)})
else:
fitted_param.update({'piEN': params['piEN'][3].astype(np.float64)})
result = np.append(result, fitted_param['piEN'])
if params['s'][0]!="fit":
if params['s'][0]=="gri":
nuisance.update({'s': node['s']})
else:
nuisance.update({'s': params['s'][3].astype(np.float64)})
else:
fitted_param.update({'s': params['s'][3].astype(np.float64)})
result = np.append(result, fitted_param['s'])
if params['q'][0]!="fit":
if params['q'][0]=="gri":
nuisance.update({'q': node['q']})
else:
nuisance.update({'q': params['q'][3].astype(np.float64)})
else:
fitted_param.update({'q': params['q'][3].astype(np.float64)})
result = np.append(result, fitted_param['q'])
if params['alpha'][0]!="fit":
if params['alpha'][0]=="gri":
nuisance.update({'alpha': node['alpha']})
else:
nuisance.update({'alpha': params['alpha'][3].astype(np.float64)})
else:
fitted_param.update({'alpha': params['alpha'][3].astype(np.float64)})
result = np.append(result, fitted_param['alpha'])
if params['dalpha'][0]!="fit":
if params['dalpha'][0]=="gri":
nuisance.update({'dalpha': node['dalpha']})
else:
nuisance.update({'dalpha': params['dalpha'][3].astype(np.float64)})
else:
fitted_param.update({'dalpha': params['dalpha'][3].astype(np.float64)})
result = np.append(result, fitted_param['dalpha'])
if params['ds'][0]!="fit":
if params['ds'][0]=="gri":
nuisance.update({'ds': node['ds']})
else:
nuisance.update({'ds': params['ds'][3].astype(np.float64)})
else:
fitted_param.update({'ds': params['ds'][3].astype(np.float64)})
result = np.append(result, fitted_param['ds'])
# Parameters of MCMC
ndim, nwalkers = len(fitted_param), cfgsetup.getint('FitSetupDMCMC', 'Chains') # Attention nwalkers nombre pair.
# pos = [result + 0.1*np.random.randn(ndim) for i in range(
# nwalkers)]
if fitted_param!={}:
# Use delta random in a specified interval
if cfgsetup.getboolean("FitSetupDMCMC", "Resume"):
if pos_pickle.shape[1] != len(fitted_param):
text = "\n\033[1m\033[91mThe number of fitted parameters does not fit in 'Resume' mode. muLAn killed.\033[0m"
sys.exit(text)
if flag_grid_yes==1:
text = "\n\033[1m\033[91m'Resume' mode not compatible with a grid. muLAn killed.\033[0m"
sys.exit(text)
pos = pos_pickle
rstate = rgenerator_piclke
else:
pos = ini_chains_gene(fitted_param, nwalkers, params)
rstate = None
# Sampler
if id_grid > 0:
del sampler
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob,
args=(time_serie, model_param, fitted_param, nuisance, model2load, interpol_method, cfgsetup.getfloat('Modelling', 'tb'), cfgsetup),
threads=cfgsetup.getint('FitSetupDMCMC', 'Threads'))
# ==============================================================
# RUN MCMC
# ==============================================================
chain_lengh = cfgsetup.getint('FitSetupDMCMC', 'ChainLength')
id_model = np.ones(nwalkers, dtype=np.int64)
for pos, lnprobc, rstate in sampler.sample(pos, rstate0=rstate, iterations=chain_lengh, storechain=False):
# text = 'MCMC: {:d} / {:d}'.format(id_model[0], chain_lengh)
# communicate(cfgsetup, 1, text)
if cfgsetup.getint("Modelling", "Verbose") >=3:
if flag_grid_yes:
update_progress_grid(id_model[0], chain_lengh, id_grid+1, lengh_grid)
else:
update_progress("Progression", id_model[0], chain_lengh)
accrate = sampler.acceptance_fraction
key_list = np.array([])
for key, value in fitted_param.iteritems():
key_list = np.append(key_list, key)
for i in range(nwalkers):
param_model = nuisance
id = 0
cond = (key_list=='t0')
if cond.sum()==1:
param_model.update({'t0' : pos[i][id]})
id=id+1
cond = (key_list=='u0')
if cond.sum()==1:
param_model.update({'u0' : pos[i][id]})
id=id+1
cond = (key_list=='tE')
if cond.sum()==1:
param_model.update({'tE' : pos[i][id]})
id=id+1
cond = (key_list=='rho')
if cond.sum()==1:
param_model.update({'rho' : pos[i][id]})
id=id+1
cond = (key_list=='gamma')
if cond.sum()==1:
param_model.update({'gamma' : pos[i][id]})
id=id+1
cond = (key_list=='piEE')
if cond.sum()==1:
param_model.update({'piEE' : pos[i][id]})
id=id+1
cond = (key_list=='piEN')
if cond.sum()==1:
param_model.update({'piEN' : pos[i][id]})
id=id+1
cond = (key_list=='s')
if cond.sum()==1:
param_model.update({'s' : pos[i][id]})
id=id+1
cond = (key_list=='q')
if cond.sum()==1:
param_model.update({'q' : pos[i][id]})
id=id+1
cond = (key_list=='alpha')
if cond.sum()==1:
param_model.update({'alpha' : pos[i][id]})
id=id+1
cond = (key_list == 'dalpha')
if cond.sum() == 1:
param_model.update({'dalpha': pos[i][id]})
id=id+1
cond = (key_list == 'ds')
if cond.sum() == 1:
param_model.update({'ds': pos[i][id]})
id=id+1
# id=id+1
if flag_grid_yes:
# Best model
if id_model[i]==1:
t0_best[i] = param_model['t0']
u0_best[i] = param_model['u0']
tE_best[i] = param_model['tE']
rho_best[i] = param_model['rho']
gamma_best[i] = param_model['gamma']
piEN_best[i] = param_model['piEN']
piEE_best[i] = param_model['piEE']
s_best[i] = param_model['s']
q_best[i] = param_model['q']
alpha_best[i] = param_model['alpha']
dalpha_best[i] = param_model['dalpha']
ds_best[i] = param_model['ds']
lnprob_best[i] = lnprobc[i]
accrate_best[i] = accrate[i]
date_best[i] = datetime.date.today().strftime("%Y%m%d")
hour_best[i] = datetime.datetime.utcnow().strftime("%H%M%S")
elif lnprobc[i]>lnprob_best[i]:
t0_best[i] = param_model['t0']
u0_best[i] = param_model['u0']
tE_best[i] = param_model['tE']
rho_best[i] = param_model['rho']
gamma_best[i] = param_model['gamma']
piEN_best[i] = param_model['piEN']
piEE_best[i] = param_model['piEE']
s_best[i] = param_model['s']
q_best[i] = param_model['q']
alpha_best[i] = param_model['alpha']
dalpha_best[i] = param_model['dalpha']
ds_best[i] = param_model['ds']
lnprob_best[i] = lnprobc[i]
accrate_best[i] = accrate[i]
date_best[i] = datetime.date.today().strftime("%Y%m%d")
hour_best[i] = datetime.datetime.utcnow().strftime("%H%M%S")
# Save Chains
filename4chains = path + cfgsetup.get('Controls', 'Archive')\
+ '-c{:04d}'.format(i) + '-g{:d}'.format(id_grid) + '.txt'
file_chains = open(filename4chains, 'a')
format4chains = '{:>10d} '.format(id_model[i])\
+ '{:+.10e} '.format(param_model['t0'])\
+ '{:+.10e} '.format(param_model['u0'])\
+ '{:+.10e} '.format(param_model['tE'])\
+ '{:+.10e} '.format(param_model['rho'])\
+ '{:+.10e} '.format(param_model['gamma'])\
+ '{:+.10e} '.format(param_model['piEN'])\
+ '{:+.10e} '.format(param_model['piEE'])\
+ '{:+.10e} '.format(param_model['s'])\
+ '{:+.10e} '.format(param_model['q'])\
+ '{:+.10e} '.format(param_model['alpha'])\
+ '{:+.10e} '.format(param_model['dalpha'])\
+ '{:+.10e} '.format(param_model['ds'])\
+ '{:+.10e} '.format(-2.0*lnprobc[i])\
+ '{:>7.3f} '.format(accrate[i])\
+ '{:8} '.format(datetime.date.today().strftime("%Y%m%d"))\
+ '{:6} '.format(datetime.datetime.utcnow().strftime("%H%M%S"))\
+ '{:+.10e}'.format(-2.0*lnprobc[i]/(len(time_serie['dates'])-len(fitted_param)-len(grid_params)))\
+ '\n'
file_chains.write(format4chains)
file_chains.close()
else:
if (len(accrate_loaded)>0) & (len(id_loaded)>0):
id_model_curr = int(id_model[i] + id_loaded[i])
accrate_curr = (1.0 * accrate[i] * id_model[i] + 1.0 * accrate_loaded[i] * id_loaded[i]) / id_model_curr
else:
id_model_curr = int(id_model[i])
accrate_curr = accrate[i]
path = cfgsetup.get('FullPaths', 'Event') + cfgsetup.get('RelativePaths', 'Chains')
filename4chains = path + cfgsetup.get('Controls', 'Archive')\
+ '-c{:04d}'.format(i) + '.txt'
file_chains = open(filename4chains, 'a')
format4chains = '{:>10d} '.format(id_model_curr)\
+ '{:+.10e} '.format(param_model['t0'])\
+ '{:+.10e} '.format(param_model['u0'])\
+ '{:+.10e} '.format(param_model['tE'])\
+ '{:+.10e} '.format(param_model['rho'])\
+ '{:+.10e} '.format(param_model['gamma'])\
+ '{:+.10e} '.format(param_model['piEN'])\
+ '{:+.10e} '.format(param_model['piEE'])\
+ '{:+.10e} '.format(param_model['s'])\
+ '{:+.10e} '.format(param_model['q'])\
+ '{:+.10e} '.format(param_model['alpha'])\
+ '{:+.10e} '.format(param_model['dalpha'])\
+ '{:+.10e} '.format(param_model['ds'])\
+ '{:+.10e} '.format(-2.0*lnprobc[i])\
+ '{:>7.3f} '.format(accrate_curr)\
+ '{:8} '.format(datetime.date.today().strftime("%Y%m%d"))\
+ '{:6} '.format(datetime.datetime.utcnow().strftime("%H%M%S"))\
+ '{:+.10e}'.format(-2.0*lnprobc[i]/(len(time_serie['dates'])-len(fitted_param)-len(grid_params)))\
+ '\n'
file_chains.write(format4chains)
file_chains.close()
id_model[i] = id_model[i] + 1
# Emergency Stop
file = open(cfgsetup.get('FullPaths', 'Event') + '.emergencystop', 'r')
stop = 0
for line in file:
if line.strip() == '1':
stop=1
file.close()
fn_lock = cfgsetup.get('FullPaths', 'Event') + '.lock'
if not os.path.exists(fn_lock): stop=1
# Record the last position
filename4pos = path + cfgsetup.get('Controls', 'Archive') + '-lastposition.p'
file_save = open(filename4pos, "w")
pickle.dump(pos, file_save)
file_save.close()
del pos
# Record the state of the pseudo-random generator
filename = path + cfgsetup.get('Controls', 'Archive') + '-rgenerator.p'
file_save = open(filename, "w")
pickle.dump(rstate, file_save)
file_save.close()
if stop==1:
break
# print("On continue")
# Save best model if grid
if flag_grid_yes:
for i in range(nwalkers):
path = cfgsetup.get('FullPaths', 'Event') + cfgsetup.get('RelativePaths', 'Chains')
filename4chains = path + cfgsetup.get('Controls', 'Archive')\
+ '-c{:04d}'.format(i) + '.txt'
file_chains = open(filename4chains, 'a')
format4chains = '{:>10d} '.format(id_grid+1)\
+ '{:+.10e} '.format(t0_best[i])\
+ '{:+.10e} '.format(u0_best[i])\
+ '{:+.10e} '.format(tE_best[i])\
+ '{:+.10e} '.format(rho_best[i])\
+ '{:+.10e} '.format(gamma_best[i])\
+ '{:+.10e} '.format(piEN_best[i])\
+ '{:+.10e} '.format(piEE_best[i])\
+ '{:+.10e} '.format(s_best[i])\
+ '{:+.10e} '.format(q_best[i])\
+ '{:+.10e} '.format(alpha_best[i])\
+ '{:+.10e} '.format(dalpha_best[i])\
+ '{:+.10e} '.format(ds_best[i]) \
+ '{:+.10e} '.format(-2.0*lnprob_best[i])\
+ '{:>7.3f} '.format(accrate[i])\
+ '{:8} '.format(date_best[i])\
+ '{:6} '.format(hour_best[i])\
+ '{:+.10e}'.format(-2.0*lnprobc[i]/(len(time_serie['dates'])-len(fitted_param)-len(grid_params)))\
+ '\n'
file_chains.write(format4chains)
file_chains.close()
if stop==1:
break
# Create an archive for each MCMC on the grid
if flag_grid_yes:
path_event = cfgsetup.get('FullPaths', 'Event')
path = path_event + cfgsetup.get('RelativePaths', 'Chains')\
+ '{:d}'.format(id_grid) + '/'
os.makedirs(path)
text = 'cp ' + path_event + cfgsetup.get('RelativePaths', 'Chains')\
+ '*g' + '{:d}'.format(id_grid) + '* ' + path
bash_command(text)
text = 'cp ' + path_event + cfgsetup.get('RelativePaths', 'Chains')\
+ '*.p ' + path
bash_command(text)
shutil.make_archive(path, 'zip', path)
shutil.rmtree(path)
text = 'rm ' + path_event + cfgsetup.get('RelativePaths', 'Chains')\
+ '*g' + '{:d}'.format(id_grid) + '* '
bash_command(text)
else:
stop = 0
for i in range(nwalkers):
param_model = nuisance
# Calculation of the amplification
observatories = np.unique(time_serie['obs'])
models_lib = np.unique(time_serie['model'])
for jjj in range(len(observatories)):
cond2 = (time_serie['obs']==observatories[jjj])
for iii in range(models_lib.shape[0]):
cond = (time_serie['model'] == models_lib[iii]) & (time_serie['obs']==observatories[jjj])
if cond.sum() > 0:
time_serie_export = time_serie['dates'][cond]
DsN_export = time_serie['DsN'][cond]
DsE_export = time_serie['DsE'][cond]
Ds_export = dict({'N':DsN_export, 'E':DsE_export})
try:
kwargs_method = dict(cfgsetup.items(models_lib[iii]))
except:
kwargs_method = dict()
amp = models[models_lib[iii]].magnifcalc(time_serie_export, param_model, Ds=Ds_export, tb=cfgsetup.getfloat('Modelling', 'tb'), **kwargs_method)
time_serie['amp'][cond] = amp
del amp
# Calculation of fs and fb
# fs, fb = algebra.fsfb(time_serie, cond2, blending=True)
fs, fb = algebra.fsfbwsig(time_serie, cond2, blending=True)
time_serie['fs'][cond2] = fs
time_serie['fb'][cond2] = fb
# Calculation of chi2
time_serie['flux_model'] = time_serie['amp']*time_serie['fs'] + time_serie['fb']
time_serie['chi2pp'] = np.power((time_serie['flux']-time_serie['flux_model'])/time_serie['err_flux'], 2)
chi2_ini = np.sum(time_serie['chi2pp'])
path = cfgsetup.get('FullPaths', 'Event') + cfgsetup.get('RelativePaths', 'Chains')
filename4chains = path + cfgsetup.get('Controls', 'Archive')\
+ '-c{:04d}'.format(i) + '.txt'
file_chains = open(filename4chains, 'a')
format4chains = '{:>10d} '.format(0)\
+ '{:+.10e} '.format(param_model['t0'])\
+ '{:+.10e} '.format(param_model['u0'])\
+ '{:+.10e} '.format(param_model['tE'])\
+ '{:+.10e} '.format(param_model['rho'])\
+ '{:+.10e} '.format(param_model['gamma'])\
+ '{:+.10e} '.format(param_model['piEN'])\
+ '{:+.10e} '.format(param_model['piEE'])\
+ '{:+.10e} '.format(param_model['s'])\
+ '{:+.10e} '.format(param_model['q'])\
+ '{:+.10e} '.format(param_model['alpha'])\
+ '{:+.10e} '.format(param_model['dalpha'])\
+ '{:+.10e} '.format(param_model['ds'])\
+ '{:+.10e} '.format(chi2_ini)\
+ '{:>7.3f} '.format(0.0)\
+ '{:8} '.format(datetime.date.today().strftime("%Y%m%d"))\
+ '{:6} '.format(datetime.datetime.utcnow().strftime("%H%M%S"))\
+ '{:+.10e}'.format(chi2_ini/len(time_serie['dates']))\
+ '\n'
file_chains.write(format4chains)
file_chains.close()
try:
del lnprobc
del rstate
except:
pass
# Create an archive
path_code = cfgsetup.get('FullPaths', 'Code')
path_event = cfgsetup.get('FullPaths', 'Event')
path_arch = path_event + cfgsetup.get('RelativePaths', 'Archives')
if os.path.exists(path_arch + cfgsetup.get('Controls', 'Archive')):
shutil.rmtree(path_arch + cfgsetup.get('Controls', 'Archive'))
dir = path_event + cfgsetup.get('RelativePaths', 'Chains')
shutil.copytree(dir, path_arch + cfgsetup.get('Controls', 'Archive') + "/" + cfgsetup.get('RelativePaths', 'Chains'))
dir = path_event + cfgsetup.get('RelativePaths', 'Data')
shutil.copytree(dir, path_arch + cfgsetup.get('Controls', 'Archive') + "/" + cfgsetup.get('RelativePaths', 'Data'))
file = path_event + "mulan.py"
shutil.copyfile(file, path_arch + cfgsetup.get('Controls', 'Archive') + "/mulan.py")
file = path_event + "observatories.ini"
shutil.copyfile(file, path_arch + cfgsetup.get('Controls', 'Archive') + "/observatories.ini")
file = path_event + "setup.ini"
shutil.copyfile(file, path_arch + cfgsetup.get('Controls', 'Archive') + "/setup.ini")
file = path_event + "advancedsetup.ini"
shutil.copyfile(file, path_arch + cfgsetup.get('Controls', 'Archive') + "/advancedsetup.ini")
dir = path_arch + cfgsetup.get('Controls', 'Archive') + "/" + cfgsetup.get('RelativePaths', 'Plots')
os.makedirs(dir)
try:
shutil.rmtree(path_arch + cfgsetup.get('Controls', 'Archive') + "/" + cfgsetup.get('Controls', 'Archive') + ".zip")
except:
UserWarning("A ZIP file already exits. Archive not created.")
filename = path_arch + cfgsetup.get('Controls', 'Archive')
shutil.make_archive(filename, 'zip', filename)
shutil.rmtree(filename)
text = "Create archive {0:}".format(cfgsetup.get('Controls', 'Archive'))
communicate(cfgsetup, 3, text, opts=False, prefix=False, newline=False, tab=True)
# if os.path.exists(path_arch + cfgsetup.get('Controls', 'Archive') + '.zip'):
# os.remove(path_arch + cfgsetup.get('Controls', 'Archive') + '.zip')
# shutil.move(filename + '.zip', path_arch)
# Free memory
try:
del id_model, param_model, cond, id, accrate, key_list
del value, chain_lengh, sampler, ndim, nwalkers
del fitted_param, nuisance, result, params, grid_params
del nb_params_grid
except:
pass
if stop==1:
sys.exit("\nProcess stopped by the user.\n")
| mit |
fcchou/HelixMC | examples/bp_database/bp_database.py | 1 | 4804 | import tempfile
import subprocess
import gzip
import urllib
import os.path
import os
import glob
import numpy as np
import scipy.cluster.vq as vq
import matplotlib.pyplot as plt
# This example demonstrate how the database file
# database/DNA_2.0_noprot.npz is curated.
# Download PDBs #
pdb_list = '../../helixmc/data/pdb_list/DNA_2.0_noprot.txt'
server_name = 'http://www.rcsb.org/pdb/files/'
for pdb_id in open(pdb_list):
filename = '%s.pdb.gz' % pdb_id[:-1].lower()
filename_pdb = '%s.pdb' % pdb_id[:-1].lower()
addr = '%s%s' % (server_name, filename)
if (not os.path.exists(filename) and
not os.path.exists(filename_pdb)):
print 'Downloading', filename
urllib.urlretrieve(addr, filename) # Download the files
#Uncompression
if os.path.exists(filename):
f = gzip.open(filename)
out = open(filename_pdb, 'w')
out.write(f.read())
out.close()
f.close()
os.remove(filename)
# Run 3-DNA on the pdb files (need to get 3DNA installed) #
def run_3dna(name):
temp = tempfile.NamedTemporaryFile()
subprocess.check_call('find_pair %s %s' % (name, temp.name), shell=True)
try:
subprocess.check_call('analyze %s' % temp.name, shell=True)
except:
pass
for filename in glob.glob('*.pdb'):
if not os.path.exists(filename.replace('.pdb', '.out')):
run_3dna(filename)
# Load the bp-step data from 3DNA output #
seq_list = [
'AA', 'AT', 'AG', 'AC', 'GA', 'GC', 'GT', 'GG', 'TT',
'TA', 'TC', 'TG', 'CC', 'CG', 'CA', 'CT'
]
params = [[] for i in xrange(16)]
for out_3dna in glob.glob('*.out'):
is_readline1 = False
is_readline2 = False
wc_bp = []
for line in open(out_3dna):
if is_readline1:
if len(line) < 4 or line[:4] == '****':
is_readline1 = False
else:
bp_num = int(line[:4])
bp_tag = line[34:39]
if bp_tag in ['-----', 'x----', '----x']:
wc_bp.append(bp_num)
if 'Strand I Strand II' in line:
is_readline1 = True
if is_readline2:
if ('~~~~' in line) or ('****' in line):
break
if ('----' not in line):
bp_num = int(line[:4])
if bp_num in wc_bp and (bp_num + 1) in wc_bp:
try:
bp_type = line[5:7].upper()
idx = seq_list.index(bp_type)
params[idx].append(np.fromstring(line[10:], sep=' '))
except:
pass
if 'Shift Slide' in line:
is_readline2 = True
# data filtering #
params_filtered = []
for params_i in params:
params_i = np.array(params_i)
params_i = params_i[params_i[:, 5] > 5]
params_i = params_i[params_i[:, 2] < 5.5]
data_avg = np.average(params_i, axis=0)
data_std = np.std(params_i, axis=0)
data_min = data_avg - 4 * data_std
data_max = data_avg + 4 * data_std
params_i = params_i[np.all(params_i >= data_min[np.newaxis, :], axis=1)]
params_i = params_i[np.all(params_i <= data_max[np.newaxis, :], axis=1)]
params_filtered.append(params_i)
params_name = ['shift', 'slide', 'rise', 'tilt', 'roll', 'twist']
all_params = np.vstack(params_filtered) # list without sequence dependence
# Plot the histogram before clustering #
fig = plt.figure(figsize=(12, 8))
for i in xrange(6):
subplt = fig.add_subplot(2, 3, i+1)
plt.hist(all_params[:, i], 30, histtype='step')
plt.title(params_name[i])
# Clustering #
n_cluster = 3
whitened = vq.whiten(all_params)
avg1 = np.average(whitened[:, 1])
whitened[:, 1] = (whitened[:, 1] - avg1) * 1.2 + avg1
centr, f = vq.kmeans(whitened, n_cluster)
reject_cluster = centr[centr[:, 1] < -1] # cluster of center slide is A-DNA
acpt_cluster = centr[centr[:, 1] >= -1] # the other two B-DNA
centr = np.vstack((reject_cluster, acpt_cluster))
# Plot after clustering #
idx, _ = vq.vq(whitened, centr)
fig = plt.figure(figsize=(12, 8))
for i in xrange(6):
subplt = fig.add_subplot(2, 3, i+1)
for j in xrange(n_cluster):
plt.hist(all_params[idx == j][:, i], 30, histtype='step')
plt.title(params_name[i])
# Remove the A-DNA cluster from the sequence-dependent params #
start = 0
final_params = []
for params in params_filtered:
end = start + params.shape[0]
new_idx = idx[start:end]
params = params[new_idx != 0]
params[:, 3:] = np.radians(params[:, 3:]) # Convert angles to radians
final_params.append(params)
start = end
# Output the database file #
expr = "np.savez( 'DNA_2.0_noprot',"
for i in xrange(16):
expr += " %s = final_params[%d], " % (seq_list[i], i)
expr += ')'
eval(expr)
plt.show()
| gpl-3.0 |
wzbozon/scikit-learn | sklearn/preprocessing/label.py | 137 | 27165 | # Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Olivier Grisel <[email protected]>
# Andreas Mueller <[email protected]>
# Joel Nothman <[email protected]>
# Hamzeh Alsalhi <[email protected]>
# License: BSD 3 clause
from collections import defaultdict
import itertools
import array
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, TransformerMixin
from ..utils.fixes import np_version
from ..utils.fixes import sparse_min_max
from ..utils.fixes import astype
from ..utils.fixes import in1d
from ..utils import column_or_1d
from ..utils.validation import check_array
from ..utils.validation import check_is_fitted
from ..utils.validation import _num_samples
from ..utils.multiclass import unique_labels
from ..utils.multiclass import type_of_target
from ..externals import six
zip = six.moves.zip
map = six.moves.map
__all__ = [
'label_binarize',
'LabelBinarizer',
'LabelEncoder',
'MultiLabelBinarizer',
]
def _check_numpy_unicode_bug(labels):
"""Check that user is not subject to an old numpy bug
Fixed in master before 1.7.0:
https://github.com/numpy/numpy/pull/243
"""
if np_version[:3] < (1, 7, 0) and labels.dtype.kind == 'U':
raise RuntimeError("NumPy < 1.7.0 does not implement searchsorted"
" on unicode data correctly. Please upgrade"
" NumPy to use LabelEncoder with unicode inputs.")
class LabelEncoder(BaseEstimator, TransformerMixin):
"""Encode labels with value between 0 and n_classes-1.
Read more in the :ref:`User Guide <preprocessing_targets>`.
Attributes
----------
classes_ : array of shape (n_class,)
Holds the label for each class.
Examples
--------
`LabelEncoder` can be used to normalize labels.
>>> from sklearn import preprocessing
>>> le = preprocessing.LabelEncoder()
>>> le.fit([1, 2, 2, 6])
LabelEncoder()
>>> le.classes_
array([1, 2, 6])
>>> le.transform([1, 1, 2, 6]) #doctest: +ELLIPSIS
array([0, 0, 1, 2]...)
>>> le.inverse_transform([0, 0, 1, 2])
array([1, 1, 2, 6])
It can also be used to transform non-numerical labels (as long as they are
hashable and comparable) to numerical labels.
>>> le = preprocessing.LabelEncoder()
>>> le.fit(["paris", "paris", "tokyo", "amsterdam"])
LabelEncoder()
>>> list(le.classes_)
['amsterdam', 'paris', 'tokyo']
>>> le.transform(["tokyo", "tokyo", "paris"]) #doctest: +ELLIPSIS
array([2, 2, 1]...)
>>> list(le.inverse_transform([2, 2, 1]))
['tokyo', 'tokyo', 'paris']
"""
def fit(self, y):
"""Fit label encoder
Parameters
----------
y : array-like of shape (n_samples,)
Target values.
Returns
-------
self : returns an instance of self.
"""
y = column_or_1d(y, warn=True)
_check_numpy_unicode_bug(y)
self.classes_ = np.unique(y)
return self
def fit_transform(self, y):
"""Fit label encoder and return encoded labels
Parameters
----------
y : array-like of shape [n_samples]
Target values.
Returns
-------
y : array-like of shape [n_samples]
"""
y = column_or_1d(y, warn=True)
_check_numpy_unicode_bug(y)
self.classes_, y = np.unique(y, return_inverse=True)
return y
def transform(self, y):
"""Transform labels to normalized encoding.
Parameters
----------
y : array-like of shape [n_samples]
Target values.
Returns
-------
y : array-like of shape [n_samples]
"""
check_is_fitted(self, 'classes_')
classes = np.unique(y)
_check_numpy_unicode_bug(classes)
if len(np.intersect1d(classes, self.classes_)) < len(classes):
diff = np.setdiff1d(classes, self.classes_)
raise ValueError("y contains new labels: %s" % str(diff))
return np.searchsorted(self.classes_, y)
def inverse_transform(self, y):
"""Transform labels back to original encoding.
Parameters
----------
y : numpy array of shape [n_samples]
Target values.
Returns
-------
y : numpy array of shape [n_samples]
"""
check_is_fitted(self, 'classes_')
diff = np.setdiff1d(y, np.arange(len(self.classes_)))
if diff:
raise ValueError("y contains new labels: %s" % str(diff))
y = np.asarray(y)
return self.classes_[y]
class LabelBinarizer(BaseEstimator, TransformerMixin):
"""Binarize labels in a one-vs-all fashion
Several regression and binary classification algorithms are
available in the scikit. A simple way to extend these algorithms
to the multi-class classification case is to use the so-called
one-vs-all scheme.
At learning time, this simply consists in learning one regressor
or binary classifier per class. In doing so, one needs to convert
multi-class labels to binary labels (belong or does not belong
to the class). LabelBinarizer makes this process easy with the
transform method.
At prediction time, one assigns the class for which the corresponding
model gave the greatest confidence. LabelBinarizer makes this easy
with the inverse_transform method.
Read more in the :ref:`User Guide <preprocessing_targets>`.
Parameters
----------
neg_label : int (default: 0)
Value with which negative labels must be encoded.
pos_label : int (default: 1)
Value with which positive labels must be encoded.
sparse_output : boolean (default: False)
True if the returned array from transform is desired to be in sparse
CSR format.
Attributes
----------
classes_ : array of shape [n_class]
Holds the label for each class.
y_type_ : str,
Represents the type of the target data as evaluated by
utils.multiclass.type_of_target. Possible type are 'continuous',
'continuous-multioutput', 'binary', 'multiclass',
'mutliclass-multioutput', 'multilabel-indicator', and 'unknown'.
multilabel_ : boolean
True if the transformer was fitted on a multilabel rather than a
multiclass set of labels. The ``multilabel_`` attribute is deprecated
and will be removed in 0.18
sparse_input_ : boolean,
True if the input data to transform is given as a sparse matrix, False
otherwise.
indicator_matrix_ : str
'sparse' when the input data to tansform is a multilable-indicator and
is sparse, None otherwise. The ``indicator_matrix_`` attribute is
deprecated as of version 0.16 and will be removed in 0.18
Examples
--------
>>> from sklearn import preprocessing
>>> lb = preprocessing.LabelBinarizer()
>>> lb.fit([1, 2, 6, 4, 2])
LabelBinarizer(neg_label=0, pos_label=1, sparse_output=False)
>>> lb.classes_
array([1, 2, 4, 6])
>>> lb.transform([1, 6])
array([[1, 0, 0, 0],
[0, 0, 0, 1]])
Binary targets transform to a column vector
>>> lb = preprocessing.LabelBinarizer()
>>> lb.fit_transform(['yes', 'no', 'no', 'yes'])
array([[1],
[0],
[0],
[1]])
Passing a 2D matrix for multilabel classification
>>> import numpy as np
>>> lb.fit(np.array([[0, 1, 1], [1, 0, 0]]))
LabelBinarizer(neg_label=0, pos_label=1, sparse_output=False)
>>> lb.classes_
array([0, 1, 2])
>>> lb.transform([0, 1, 2, 1])
array([[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
[0, 1, 0]])
See also
--------
label_binarize : function to perform the transform operation of
LabelBinarizer with fixed classes.
"""
def __init__(self, neg_label=0, pos_label=1, sparse_output=False):
if neg_label >= pos_label:
raise ValueError("neg_label={0} must be strictly less than "
"pos_label={1}.".format(neg_label, pos_label))
if sparse_output and (pos_label == 0 or neg_label != 0):
raise ValueError("Sparse binarization is only supported with non "
"zero pos_label and zero neg_label, got "
"pos_label={0} and neg_label={1}"
"".format(pos_label, neg_label))
self.neg_label = neg_label
self.pos_label = pos_label
self.sparse_output = sparse_output
def fit(self, y):
"""Fit label binarizer
Parameters
----------
y : numpy array of shape (n_samples,) or (n_samples, n_classes)
Target values. The 2-d matrix should only contain 0 and 1,
represents multilabel classification.
Returns
-------
self : returns an instance of self.
"""
self.y_type_ = type_of_target(y)
if 'multioutput' in self.y_type_:
raise ValueError("Multioutput target data is not supported with "
"label binarization")
if _num_samples(y) == 0:
raise ValueError('y has 0 samples: %r' % y)
self.sparse_input_ = sp.issparse(y)
self.classes_ = unique_labels(y)
return self
def transform(self, y):
"""Transform multi-class labels to binary labels
The output of transform is sometimes referred to by some authors as the
1-of-K coding scheme.
Parameters
----------
y : numpy array or sparse matrix of shape (n_samples,) or
(n_samples, n_classes) Target values. The 2-d matrix should only
contain 0 and 1, represents multilabel classification. Sparse
matrix can be CSR, CSC, COO, DOK, or LIL.
Returns
-------
Y : numpy array or CSR matrix of shape [n_samples, n_classes]
Shape will be [n_samples, 1] for binary problems.
"""
check_is_fitted(self, 'classes_')
y_is_multilabel = type_of_target(y).startswith('multilabel')
if y_is_multilabel and not self.y_type_.startswith('multilabel'):
raise ValueError("The object was not fitted with multilabel"
" input.")
return label_binarize(y, self.classes_,
pos_label=self.pos_label,
neg_label=self.neg_label,
sparse_output=self.sparse_output)
def inverse_transform(self, Y, threshold=None):
"""Transform binary labels back to multi-class labels
Parameters
----------
Y : numpy array or sparse matrix with shape [n_samples, n_classes]
Target values. All sparse matrices are converted to CSR before
inverse transformation.
threshold : float or None
Threshold used in the binary and multi-label cases.
Use 0 when:
- Y contains the output of decision_function (classifier)
Use 0.5 when:
- Y contains the output of predict_proba
If None, the threshold is assumed to be half way between
neg_label and pos_label.
Returns
-------
y : numpy array or CSR matrix of shape [n_samples] Target values.
Notes
-----
In the case when the binary labels are fractional
(probabilistic), inverse_transform chooses the class with the
greatest value. Typically, this allows to use the output of a
linear model's decision_function method directly as the input
of inverse_transform.
"""
check_is_fitted(self, 'classes_')
if threshold is None:
threshold = (self.pos_label + self.neg_label) / 2.
if self.y_type_ == "multiclass":
y_inv = _inverse_binarize_multiclass(Y, self.classes_)
else:
y_inv = _inverse_binarize_thresholding(Y, self.y_type_,
self.classes_, threshold)
if self.sparse_input_:
y_inv = sp.csr_matrix(y_inv)
elif sp.issparse(y_inv):
y_inv = y_inv.toarray()
return y_inv
def label_binarize(y, classes, neg_label=0, pos_label=1, sparse_output=False):
"""Binarize labels in a one-vs-all fashion
Several regression and binary classification algorithms are
available in the scikit. A simple way to extend these algorithms
to the multi-class classification case is to use the so-called
one-vs-all scheme.
This function makes it possible to compute this transformation for a
fixed set of class labels known ahead of time.
Parameters
----------
y : array-like
Sequence of integer labels or multilabel data to encode.
classes : array-like of shape [n_classes]
Uniquely holds the label for each class.
neg_label : int (default: 0)
Value with which negative labels must be encoded.
pos_label : int (default: 1)
Value with which positive labels must be encoded.
sparse_output : boolean (default: False),
Set to true if output binary array is desired in CSR sparse format
Returns
-------
Y : numpy array or CSR matrix of shape [n_samples, n_classes]
Shape will be [n_samples, 1] for binary problems.
Examples
--------
>>> from sklearn.preprocessing import label_binarize
>>> label_binarize([1, 6], classes=[1, 2, 4, 6])
array([[1, 0, 0, 0],
[0, 0, 0, 1]])
The class ordering is preserved:
>>> label_binarize([1, 6], classes=[1, 6, 4, 2])
array([[1, 0, 0, 0],
[0, 1, 0, 0]])
Binary targets transform to a column vector
>>> label_binarize(['yes', 'no', 'no', 'yes'], classes=['no', 'yes'])
array([[1],
[0],
[0],
[1]])
See also
--------
LabelBinarizer : class used to wrap the functionality of label_binarize and
allow for fitting to classes independently of the transform operation
"""
if not isinstance(y, list):
# XXX Workaround that will be removed when list of list format is
# dropped
y = check_array(y, accept_sparse='csr', ensure_2d=False, dtype=None)
else:
if _num_samples(y) == 0:
raise ValueError('y has 0 samples: %r' % y)
if neg_label >= pos_label:
raise ValueError("neg_label={0} must be strictly less than "
"pos_label={1}.".format(neg_label, pos_label))
if (sparse_output and (pos_label == 0 or neg_label != 0)):
raise ValueError("Sparse binarization is only supported with non "
"zero pos_label and zero neg_label, got "
"pos_label={0} and neg_label={1}"
"".format(pos_label, neg_label))
# To account for pos_label == 0 in the dense case
pos_switch = pos_label == 0
if pos_switch:
pos_label = -neg_label
y_type = type_of_target(y)
if 'multioutput' in y_type:
raise ValueError("Multioutput target data is not supported with label "
"binarization")
if y_type == 'unknown':
raise ValueError("The type of target data is not known")
n_samples = y.shape[0] if sp.issparse(y) else len(y)
n_classes = len(classes)
classes = np.asarray(classes)
if y_type == "binary":
if len(classes) == 1:
Y = np.zeros((len(y), 1), dtype=np.int)
Y += neg_label
return Y
elif len(classes) >= 3:
y_type = "multiclass"
sorted_class = np.sort(classes)
if (y_type == "multilabel-indicator" and classes.size != y.shape[1]):
raise ValueError("classes {0} missmatch with the labels {1}"
"found in the data".format(classes, unique_labels(y)))
if y_type in ("binary", "multiclass"):
y = column_or_1d(y)
# pick out the known labels from y
y_in_classes = in1d(y, classes)
y_seen = y[y_in_classes]
indices = np.searchsorted(sorted_class, y_seen)
indptr = np.hstack((0, np.cumsum(y_in_classes)))
data = np.empty_like(indices)
data.fill(pos_label)
Y = sp.csr_matrix((data, indices, indptr),
shape=(n_samples, n_classes))
elif y_type == "multilabel-indicator":
Y = sp.csr_matrix(y)
if pos_label != 1:
data = np.empty_like(Y.data)
data.fill(pos_label)
Y.data = data
else:
raise ValueError("%s target data is not supported with label "
"binarization" % y_type)
if not sparse_output:
Y = Y.toarray()
Y = astype(Y, int, copy=False)
if neg_label != 0:
Y[Y == 0] = neg_label
if pos_switch:
Y[Y == pos_label] = 0
else:
Y.data = astype(Y.data, int, copy=False)
# preserve label ordering
if np.any(classes != sorted_class):
indices = np.searchsorted(sorted_class, classes)
Y = Y[:, indices]
if y_type == "binary":
if sparse_output:
Y = Y.getcol(-1)
else:
Y = Y[:, -1].reshape((-1, 1))
return Y
def _inverse_binarize_multiclass(y, classes):
"""Inverse label binarization transformation for multiclass.
Multiclass uses the maximal score instead of a threshold.
"""
classes = np.asarray(classes)
if sp.issparse(y):
# Find the argmax for each row in y where y is a CSR matrix
y = y.tocsr()
n_samples, n_outputs = y.shape
outputs = np.arange(n_outputs)
row_max = sparse_min_max(y, 1)[1]
row_nnz = np.diff(y.indptr)
y_data_repeated_max = np.repeat(row_max, row_nnz)
# picks out all indices obtaining the maximum per row
y_i_all_argmax = np.flatnonzero(y_data_repeated_max == y.data)
# For corner case where last row has a max of 0
if row_max[-1] == 0:
y_i_all_argmax = np.append(y_i_all_argmax, [len(y.data)])
# Gets the index of the first argmax in each row from y_i_all_argmax
index_first_argmax = np.searchsorted(y_i_all_argmax, y.indptr[:-1])
# first argmax of each row
y_ind_ext = np.append(y.indices, [0])
y_i_argmax = y_ind_ext[y_i_all_argmax[index_first_argmax]]
# Handle rows of all 0
y_i_argmax[np.where(row_nnz == 0)[0]] = 0
# Handles rows with max of 0 that contain negative numbers
samples = np.arange(n_samples)[(row_nnz > 0) &
(row_max.ravel() == 0)]
for i in samples:
ind = y.indices[y.indptr[i]:y.indptr[i + 1]]
y_i_argmax[i] = classes[np.setdiff1d(outputs, ind)][0]
return classes[y_i_argmax]
else:
return classes.take(y.argmax(axis=1), mode="clip")
def _inverse_binarize_thresholding(y, output_type, classes, threshold):
"""Inverse label binarization transformation using thresholding."""
if output_type == "binary" and y.ndim == 2 and y.shape[1] > 2:
raise ValueError("output_type='binary', but y.shape = {0}".
format(y.shape))
if output_type != "binary" and y.shape[1] != len(classes):
raise ValueError("The number of class is not equal to the number of "
"dimension of y.")
classes = np.asarray(classes)
# Perform thresholding
if sp.issparse(y):
if threshold > 0:
if y.format not in ('csr', 'csc'):
y = y.tocsr()
y.data = np.array(y.data > threshold, dtype=np.int)
y.eliminate_zeros()
else:
y = np.array(y.toarray() > threshold, dtype=np.int)
else:
y = np.array(y > threshold, dtype=np.int)
# Inverse transform data
if output_type == "binary":
if sp.issparse(y):
y = y.toarray()
if y.ndim == 2 and y.shape[1] == 2:
return classes[y[:, 1]]
else:
if len(classes) == 1:
y = np.empty(len(y), dtype=classes.dtype)
y.fill(classes[0])
return y
else:
return classes[y.ravel()]
elif output_type == "multilabel-indicator":
return y
else:
raise ValueError("{0} format is not supported".format(output_type))
class MultiLabelBinarizer(BaseEstimator, TransformerMixin):
"""Transform between iterable of iterables and a multilabel format
Although a list of sets or tuples is a very intuitive format for multilabel
data, it is unwieldy to process. This transformer converts between this
intuitive format and the supported multilabel format: a (samples x classes)
binary matrix indicating the presence of a class label.
Parameters
----------
classes : array-like of shape [n_classes] (optional)
Indicates an ordering for the class labels
sparse_output : boolean (default: False),
Set to true if output binary array is desired in CSR sparse format
Attributes
----------
classes_ : array of labels
A copy of the `classes` parameter where provided,
or otherwise, the sorted set of classes found when fitting.
Examples
--------
>>> mlb = MultiLabelBinarizer()
>>> mlb.fit_transform([(1, 2), (3,)])
array([[1, 1, 0],
[0, 0, 1]])
>>> mlb.classes_
array([1, 2, 3])
>>> mlb.fit_transform([set(['sci-fi', 'thriller']), set(['comedy'])])
array([[0, 1, 1],
[1, 0, 0]])
>>> list(mlb.classes_)
['comedy', 'sci-fi', 'thriller']
"""
def __init__(self, classes=None, sparse_output=False):
self.classes = classes
self.sparse_output = sparse_output
def fit(self, y):
"""Fit the label sets binarizer, storing `classes_`
Parameters
----------
y : iterable of iterables
A set of labels (any orderable and hashable object) for each
sample. If the `classes` parameter is set, `y` will not be
iterated.
Returns
-------
self : returns this MultiLabelBinarizer instance
"""
if self.classes is None:
classes = sorted(set(itertools.chain.from_iterable(y)))
else:
classes = self.classes
dtype = np.int if all(isinstance(c, int) for c in classes) else object
self.classes_ = np.empty(len(classes), dtype=dtype)
self.classes_[:] = classes
return self
def fit_transform(self, y):
"""Fit the label sets binarizer and transform the given label sets
Parameters
----------
y : iterable of iterables
A set of labels (any orderable and hashable object) for each
sample. If the `classes` parameter is set, `y` will not be
iterated.
Returns
-------
y_indicator : array or CSR matrix, shape (n_samples, n_classes)
A matrix such that `y_indicator[i, j] = 1` iff `classes_[j]` is in
`y[i]`, and 0 otherwise.
"""
if self.classes is not None:
return self.fit(y).transform(y)
# Automatically increment on new class
class_mapping = defaultdict(int)
class_mapping.default_factory = class_mapping.__len__
yt = self._transform(y, class_mapping)
# sort classes and reorder columns
tmp = sorted(class_mapping, key=class_mapping.get)
# (make safe for tuples)
dtype = np.int if all(isinstance(c, int) for c in tmp) else object
class_mapping = np.empty(len(tmp), dtype=dtype)
class_mapping[:] = tmp
self.classes_, inverse = np.unique(class_mapping, return_inverse=True)
yt.indices = np.take(inverse, yt.indices)
if not self.sparse_output:
yt = yt.toarray()
return yt
def transform(self, y):
"""Transform the given label sets
Parameters
----------
y : iterable of iterables
A set of labels (any orderable and hashable object) for each
sample. If the `classes` parameter is set, `y` will not be
iterated.
Returns
-------
y_indicator : array or CSR matrix, shape (n_samples, n_classes)
A matrix such that `y_indicator[i, j] = 1` iff `classes_[j]` is in
`y[i]`, and 0 otherwise.
"""
class_to_index = dict(zip(self.classes_, range(len(self.classes_))))
yt = self._transform(y, class_to_index)
if not self.sparse_output:
yt = yt.toarray()
return yt
def _transform(self, y, class_mapping):
"""Transforms the label sets with a given mapping
Parameters
----------
y : iterable of iterables
class_mapping : Mapping
Maps from label to column index in label indicator matrix
Returns
-------
y_indicator : sparse CSR matrix, shape (n_samples, n_classes)
Label indicator matrix
"""
indices = array.array('i')
indptr = array.array('i', [0])
for labels in y:
indices.extend(set(class_mapping[label] for label in labels))
indptr.append(len(indices))
data = np.ones(len(indices), dtype=int)
return sp.csr_matrix((data, indices, indptr),
shape=(len(indptr) - 1, len(class_mapping)))
def inverse_transform(self, yt):
"""Transform the given indicator matrix into label sets
Parameters
----------
yt : array or sparse matrix of shape (n_samples, n_classes)
A matrix containing only 1s ands 0s.
Returns
-------
y : list of tuples
The set of labels for each sample such that `y[i]` consists of
`classes_[j]` for each `yt[i, j] == 1`.
"""
if yt.shape[1] != len(self.classes_):
raise ValueError('Expected indicator for {0} classes, but got {1}'
.format(len(self.classes_), yt.shape[1]))
if sp.issparse(yt):
yt = yt.tocsr()
if len(yt.data) != 0 and len(np.setdiff1d(yt.data, [0, 1])) > 0:
raise ValueError('Expected only 0s and 1s in label indicator.')
return [tuple(self.classes_.take(yt.indices[start:end]))
for start, end in zip(yt.indptr[:-1], yt.indptr[1:])]
else:
unexpected = np.setdiff1d(yt, [0, 1])
if len(unexpected) > 0:
raise ValueError('Expected only 0s and 1s in label indicator. '
'Also got {0}'.format(unexpected))
return [tuple(self.classes_.compress(indicators)) for indicators
in yt]
| bsd-3-clause |
saiwing-yeung/scikit-learn | sklearn/utils/multiclass.py | 40 | 12966 |
# Author: Arnaud Joly, Joel Nothman, Hamzeh Alsalhi
#
# License: BSD 3 clause
"""
Multi-class / multi-label utility function
==========================================
"""
from __future__ import division
from collections import Sequence
from itertools import chain
from scipy.sparse import issparse
from scipy.sparse.base import spmatrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
import numpy as np
from ..externals.six import string_types
from .validation import check_array
from ..utils.fixes import bincount
from ..utils.fixes import array_equal
def _unique_multiclass(y):
if hasattr(y, '__array__'):
return np.unique(np.asarray(y))
else:
return set(y)
def _unique_indicator(y):
return np.arange(check_array(y, ['csr', 'csc', 'coo']).shape[1])
_FN_UNIQUE_LABELS = {
'binary': _unique_multiclass,
'multiclass': _unique_multiclass,
'multilabel-indicator': _unique_indicator,
}
def unique_labels(*ys):
"""Extract an ordered array of unique labels
We don't allow:
- mix of multilabel and multiclass (single label) targets
- mix of label indicator matrix and anything else,
because there are no explicit labels)
- mix of label indicator matrices of different sizes
- mix of string and integer labels
At the moment, we also don't allow "multiclass-multioutput" input type.
Parameters
----------
*ys : array-likes,
Returns
-------
out : numpy array of shape [n_unique_labels]
An ordered array of unique labels.
Examples
--------
>>> from sklearn.utils.multiclass import unique_labels
>>> unique_labels([3, 5, 5, 5, 7, 7])
array([3, 5, 7])
>>> unique_labels([1, 2, 3, 4], [2, 2, 3, 4])
array([1, 2, 3, 4])
>>> unique_labels([1, 2, 10], [5, 11])
array([ 1, 2, 5, 10, 11])
"""
if not ys:
raise ValueError('No argument has been passed.')
# Check that we don't mix label format
ys_types = set(type_of_target(x) for x in ys)
if ys_types == set(["binary", "multiclass"]):
ys_types = set(["multiclass"])
if len(ys_types) > 1:
raise ValueError("Mix type of y not allowed, got types %s" % ys_types)
label_type = ys_types.pop()
# Check consistency for the indicator format
if (label_type == "multilabel-indicator" and
len(set(check_array(y, ['csr', 'csc', 'coo']).shape[1]
for y in ys)) > 1):
raise ValueError("Multi-label binary indicator input with "
"different numbers of labels")
# Get the unique set of labels
_unique_labels = _FN_UNIQUE_LABELS.get(label_type, None)
if not _unique_labels:
raise ValueError("Unknown label type: %s" % repr(ys))
ys_labels = set(chain.from_iterable(_unique_labels(y) for y in ys))
# Check that we don't mix string type with number type
if (len(set(isinstance(label, string_types) for label in ys_labels)) > 1):
raise ValueError("Mix of label input types (string and number)")
return np.array(sorted(ys_labels))
def _is_integral_float(y):
return y.dtype.kind == 'f' and np.all(y.astype(int) == y)
def is_multilabel(y):
""" Check if ``y`` is in a multilabel format.
Parameters
----------
y : numpy array of shape [n_samples]
Target values.
Returns
-------
out : bool,
Return ``True``, if ``y`` is in a multilabel format, else ```False``.
Examples
--------
>>> import numpy as np
>>> from sklearn.utils.multiclass import is_multilabel
>>> is_multilabel([0, 1, 0, 1])
False
>>> is_multilabel([[1], [0, 2], []])
False
>>> is_multilabel(np.array([[1, 0], [0, 0]]))
True
>>> is_multilabel(np.array([[1], [0], [0]]))
False
>>> is_multilabel(np.array([[1, 0, 0]]))
True
"""
if hasattr(y, '__array__'):
y = np.asarray(y)
if not (hasattr(y, "shape") and y.ndim == 2 and y.shape[1] > 1):
return False
if issparse(y):
if isinstance(y, (dok_matrix, lil_matrix)):
y = y.tocsr()
return (len(y.data) == 0 or np.unique(y.data).size == 1 and
(y.dtype.kind in 'biu' or # bool, int, uint
_is_integral_float(np.unique(y.data))))
else:
labels = np.unique(y)
return len(labels) < 3 and (y.dtype.kind in 'biu' or # bool, int, uint
_is_integral_float(labels))
def check_classification_targets(y):
"""Ensure that target y is of a non-regression type.
Only the following target types (as defined in type_of_target) are allowed:
'binary', 'multiclass', 'multiclass-multioutput',
'multilabel-indicator', 'multilabel-sequences'
Parameters
----------
y : array-like
"""
y_type = type_of_target(y)
if y_type not in ['binary', 'multiclass', 'multiclass-multioutput',
'multilabel-indicator', 'multilabel-sequences']:
raise ValueError("Unknown label type: %r" % y_type)
def type_of_target(y):
"""Determine the type of data indicated by target `y`
Parameters
----------
y : array-like
Returns
-------
target_type : string
One of:
* 'continuous': `y` is an array-like of floats that are not all
integers, and is 1d or a column vector.
* 'continuous-multioutput': `y` is a 2d array of floats that are
not all integers, and both dimensions are of size > 1.
* 'binary': `y` contains <= 2 discrete values and is 1d or a column
vector.
* 'multiclass': `y` contains more than two discrete values, is not a
sequence of sequences, and is 1d or a column vector.
* 'multiclass-multioutput': `y` is a 2d array that contains more
than two discrete values, is not a sequence of sequences, and both
dimensions are of size > 1.
* 'multilabel-indicator': `y` is a label indicator matrix, an array
of two dimensions with at least two columns, and at most 2 unique
values.
* 'unknown': `y` is array-like but none of the above, such as a 3d
array, sequence of sequences, or an array of non-sequence objects.
Examples
--------
>>> import numpy as np
>>> type_of_target([0.1, 0.6])
'continuous'
>>> type_of_target([1, -1, -1, 1])
'binary'
>>> type_of_target(['a', 'b', 'a'])
'binary'
>>> type_of_target([1.0, 2.0])
'binary'
>>> type_of_target([1, 0, 2])
'multiclass'
>>> type_of_target([1.0, 0.0, 3.0])
'multiclass'
>>> type_of_target(['a', 'b', 'c'])
'multiclass'
>>> type_of_target(np.array([[1, 2], [3, 1]]))
'multiclass-multioutput'
>>> type_of_target([[1, 2]])
'multiclass-multioutput'
>>> type_of_target(np.array([[1.5, 2.0], [3.0, 1.6]]))
'continuous-multioutput'
>>> type_of_target(np.array([[0, 1], [1, 1]]))
'multilabel-indicator'
"""
valid = ((isinstance(y, (Sequence, spmatrix)) or hasattr(y, '__array__'))
and not isinstance(y, string_types))
if not valid:
raise ValueError('Expected array-like (array or non-string sequence), '
'got %r' % y)
if is_multilabel(y):
return 'multilabel-indicator'
try:
y = np.asarray(y)
except ValueError:
# Known to fail in numpy 1.3 for array of arrays
return 'unknown'
# The old sequence of sequences format
try:
if (not hasattr(y[0], '__array__') and isinstance(y[0], Sequence)
and not isinstance(y[0], string_types)):
raise ValueError('You appear to be using a legacy multi-label data'
' representation. Sequence of sequences are no'
' longer supported; use a binary array or sparse'
' matrix instead.')
except IndexError:
pass
# Invalid inputs
if y.ndim > 2 or (y.dtype == object and len(y) and
not isinstance(y.flat[0], string_types)):
return 'unknown' # [[[1, 2]]] or [obj_1] and not ["label_1"]
if y.ndim == 2 and y.shape[1] == 0:
return 'unknown' # [[]]
if y.ndim == 2 and y.shape[1] > 1:
suffix = "-multioutput" # [[1, 2], [1, 2]]
else:
suffix = "" # [1, 2, 3] or [[1], [2], [3]]
# check float and contains non-integer float values
if y.dtype.kind == 'f' and np.any(y != y.astype(int)):
# [.1, .2, 3] or [[.1, .2, 3]] or [[1., .2]] and not [1., 2., 3.]
return 'continuous' + suffix
if (len(np.unique(y)) > 2) or (y.ndim >= 2 and len(y[0]) > 1):
return 'multiclass' + suffix # [1, 2, 3] or [[1., 2., 3]] or [[1, 2]]
else:
return 'binary' # [1, 2] or [["a"], ["b"]]
def _check_partial_fit_first_call(clf, classes=None):
"""Private helper function for factorizing common classes param logic
Estimators that implement the ``partial_fit`` API need to be provided with
the list of possible classes at the first call to partial_fit.
Subsequent calls to partial_fit should check that ``classes`` is still
consistent with a previous value of ``clf.classes_`` when provided.
This function returns True if it detects that this was the first call to
``partial_fit`` on ``clf``. In that case the ``classes_`` attribute is also
set on ``clf``.
"""
if getattr(clf, 'classes_', None) is None and classes is None:
raise ValueError("classes must be passed on the first call "
"to partial_fit.")
elif classes is not None:
if getattr(clf, 'classes_', None) is not None:
if not array_equal(clf.classes_, unique_labels(classes)):
raise ValueError(
"`classes=%r` is not the same as on last call "
"to partial_fit, was: %r" % (classes, clf.classes_))
else:
# This is the first call to partial_fit
clf.classes_ = unique_labels(classes)
return True
# classes is None and clf.classes_ has already previously been set:
# nothing to do
return False
def class_distribution(y, sample_weight=None):
"""Compute class priors from multioutput-multiclass target data
Parameters
----------
y : array like or sparse matrix of size (n_samples, n_outputs)
The labels for each example.
sample_weight : array-like of shape = (n_samples,), optional
Sample weights.
Returns
-------
classes : list of size n_outputs of arrays of size (n_classes,)
List of classes for each column.
n_classes : list of integers of size n_outputs
Number of classes in each column
class_prior : list of size n_outputs of arrays of size (n_classes,)
Class distribution of each column.
"""
classes = []
n_classes = []
class_prior = []
n_samples, n_outputs = y.shape
if issparse(y):
y = y.tocsc()
y_nnz = np.diff(y.indptr)
for k in range(n_outputs):
col_nonzero = y.indices[y.indptr[k]:y.indptr[k + 1]]
# separate sample weights for zero and non-zero elements
if sample_weight is not None:
nz_samp_weight = np.asarray(sample_weight)[col_nonzero]
zeros_samp_weight_sum = (np.sum(sample_weight) -
np.sum(nz_samp_weight))
else:
nz_samp_weight = None
zeros_samp_weight_sum = y.shape[0] - y_nnz[k]
classes_k, y_k = np.unique(y.data[y.indptr[k]:y.indptr[k + 1]],
return_inverse=True)
class_prior_k = bincount(y_k, weights=nz_samp_weight)
# An explicit zero was found, combine its weight with the weight
# of the implicit zeros
if 0 in classes_k:
class_prior_k[classes_k == 0] += zeros_samp_weight_sum
# If an there is an implicit zero and it is not in classes and
# class_prior, make an entry for it
if 0 not in classes_k and y_nnz[k] < y.shape[0]:
classes_k = np.insert(classes_k, 0, 0)
class_prior_k = np.insert(class_prior_k, 0,
zeros_samp_weight_sum)
classes.append(classes_k)
n_classes.append(classes_k.shape[0])
class_prior.append(class_prior_k / class_prior_k.sum())
else:
for k in range(n_outputs):
classes_k, y_k = np.unique(y[:, k], return_inverse=True)
classes.append(classes_k)
n_classes.append(classes_k.shape[0])
class_prior_k = bincount(y_k, weights=sample_weight)
class_prior.append(class_prior_k / class_prior_k.sum())
return (classes, n_classes, class_prior)
| bsd-3-clause |
dfm/python-fsps | demos/dao69.py | 5 | 1598 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import (division, print_function, absolute_import,
unicode_literals)
import pyfits
import numpy as np
import matplotlib.pyplot as pl
import fsps
# Measurements of cluster parameters.
tage = 10. ** (8.04 - 9)
logmass = 4.09
dist_mod = 24.5
# Set up the stellar population model.
sp = fsps.StellarPopulation(imf_type=2, dust_type=1, mwr=3.1, dust2=0.3)
# The measured magnitudes from the literature.
data = {"wfc3_f160w": 16.386,
"wfc3_f275w": 17.398,
"wfc_acs_f814w": 17.155}
# There are also a few filters that we have data for but aren't included in
# the standard FSPS install:
# "F110W": 16.877,
# "F336W": 17.349,
# "F475W": 17.762,
# Load the observed spectrum.
f = pyfits.open("DAO69.fits")
obs_spec = np.array(f[0].data, dtype=float)
f.close()
obs_spec /= 5e-20
# The observed wavelength grid in the data is magically this:
obs_lambda = np.arange(0, 4540) * 1.2 + 3700
# Compute the model magnitudes.
for b, v in data.iteritems():
print(b, v, sp.get_mags(zmet=20, tage=tage, band=b) - 2.5 * logmass
+ dist_mod)
# Compute the model spectrum in ``L_sun / A``.
lam, spec = sp.get_spectrum(zmet=20, tage=tage, peraa=True)
spec *= 3.839e33 * 10. ** (logmass - dist_mod / 2.5)
f = 1.0 # obs_spec[0, obs_lambda < 5000.][-1] / spec[lam < 5000.][-1]
print(obs_spec[0, obs_lambda < 5000.][-1] / spec[lam < 5000.][-1])
pl.loglog(obs_lambda, obs_spec[0], "k")
pl.loglog(lam, spec * f, "r")
pl.xlim(3700, 7000)
# pl.ylim(10 ** 3, 10 ** 4.5)
pl.savefig("spectrum.png")
| mit |
idlead/scikit-learn | examples/exercises/plot_iris_exercise.py | 323 | 1602 | """
================================
SVM Exercise
================================
A tutorial exercise for using different SVM kernels.
This exercise is used in the :ref:`using_kernels_tut` part of the
:ref:`supervised_learning_tut` section of the :ref:`stat_learn_tut_index`.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets, svm
iris = datasets.load_iris()
X = iris.data
y = iris.target
X = X[y != 0, :2]
y = y[y != 0]
n_sample = len(X)
np.random.seed(0)
order = np.random.permutation(n_sample)
X = X[order]
y = y[order].astype(np.float)
X_train = X[:.9 * n_sample]
y_train = y[:.9 * n_sample]
X_test = X[.9 * n_sample:]
y_test = y[.9 * n_sample:]
# fit the model
for fig_num, kernel in enumerate(('linear', 'rbf', 'poly')):
clf = svm.SVC(kernel=kernel, gamma=10)
clf.fit(X_train, y_train)
plt.figure(fig_num)
plt.clf()
plt.scatter(X[:, 0], X[:, 1], c=y, zorder=10, cmap=plt.cm.Paired)
# Circle out the test data
plt.scatter(X_test[:, 0], X_test[:, 1], s=80, facecolors='none', zorder=10)
plt.axis('tight')
x_min = X[:, 0].min()
x_max = X[:, 0].max()
y_min = X[:, 1].min()
y_max = X[:, 1].max()
XX, YY = np.mgrid[x_min:x_max:200j, y_min:y_max:200j]
Z = clf.decision_function(np.c_[XX.ravel(), YY.ravel()])
# Put the result into a color plot
Z = Z.reshape(XX.shape)
plt.pcolormesh(XX, YY, Z > 0, cmap=plt.cm.Paired)
plt.contour(XX, YY, Z, colors=['k', 'k', 'k'], linestyles=['--', '-', '--'],
levels=[-.5, 0, .5])
plt.title(kernel)
plt.show()
| bsd-3-clause |
Adai0808/BuildingMachineLearningSystemsWithPython | ch05/PosTagFreqVectorizer.py | 27 | 9486 | # This code is supporting material for the book
# Building Machine Learning Systems with Python
# by Willi Richert and Luis Pedro Coelho
# published by PACKT Publishing
#
# It is made available under the MIT License
import re
from operator import itemgetter
from collections import Mapping
import scipy.sparse as sp
from sklearn.base import BaseEstimator
from sklearn.feature_extraction.text import strip_accents_ascii, strip_accents_unicode
import nltk
from collections import Counter
try:
import ujson as json # UltraJSON if available
except:
import json
poscache_filename = "poscache.json"
class PosCounter(Counter):
def __init__(self, iterable=(), normalize=True, poscache=None, **kwargs):
self.n_sents = 0
self.normalize = normalize
self.poscache = poscache
super(PosCounter, self).__init__(iterable, **kwargs)
def update(self, other):
"""Adds counts for elements in other"""
if isinstance(other, self.__class__):
self.n_sents += other.n_sents
for x, n in other.items():
self[x] += n
else:
for sent in other:
self.n_sents += 1
if self.poscache is not None:
if sent in self.poscache:
tags = self.poscache[sent]
else:
self.poscache[sent] = tags = nltk.pos_tag(
nltk.word_tokenize(sent))
else:
tags = nltk.pos_tag(nltk.word_tokenize(sent))
for x in tags:
tok, tag = x
self[tag] += 1
if self.normalize:
for x, n in self.items():
self[x] /= float(self.n_sents)
class PosTagFreqVectorizer(BaseEstimator):
"""
Convert a collection of raw documents to a matrix Pos tag frequencies
"""
def __init__(self, input='content', charset='utf-8',
charset_error='strict', strip_accents=None,
vocabulary=None,
normalize=True,
dtype=float):
self.input = input
self.charset = charset
self.charset_error = charset_error
self.strip_accents = strip_accents
if vocabulary is not None:
self.fixed_vocabulary = True
if not isinstance(vocabulary, Mapping):
vocabulary = dict((t, i) for i, t in enumerate(vocabulary))
self.vocabulary_ = vocabulary
else:
self.fixed_vocabulary = False
try:
self.poscache = json.load(open(poscache_filename, "r"))
except IOError:
self.poscache = {}
self.normalize = normalize
self.dtype = dtype
def write_poscache(self):
json.dump(self.poscache, open(poscache_filename, "w"))
def decode(self, doc):
"""Decode the input into a string of unicode symbols
The decoding strategy depends on the vectorizer parameters.
"""
if self.input == 'filename':
doc = open(doc, 'rb').read()
elif self.input == 'file':
doc = doc.read()
if isinstance(doc, bytes):
doc = doc.decode(self.charset, self.charset_error)
return doc
def build_preprocessor(self):
"""Return a function to preprocess the text before tokenization"""
# unfortunately python functools package does not have an efficient
# `compose` function that would have allowed us to chain a dynamic
# number of functions. However the however of a lambda call is a few
# hundreds of nanoseconds which is negligible when compared to the
# cost of tokenizing a string of 1000 chars for instance.
noop = lambda x: x
# accent stripping
if not self.strip_accents:
strip_accents = noop
elif hasattr(self.strip_accents, '__call__'):
strip_accents = self.strip_accents
elif self.strip_accents == 'ascii':
strip_accents = strip_accents_ascii
elif self.strip_accents == 'unicode':
strip_accents = strip_accents_unicode
else:
raise ValueError('Invalid value for "strip_accents": %s' %
self.strip_accents)
only_prose = lambda s: re.sub('<[^>]*>', '', s).replace("\n", " ")
return lambda x: strip_accents(only_prose(x))
def build_tokenizer(self):
"""Return a function that split a string in sequence of tokens"""
return nltk.sent_tokenize
def build_analyzer(self):
"""Return a callable that handles preprocessing and tokenization"""
preprocess = self.build_preprocessor()
tokenize = self.build_tokenizer()
return lambda doc: tokenize(preprocess(self.decode(doc)))
def _term_count_dicts_to_matrix(self, term_count_dicts):
i_indices = []
j_indices = []
values = []
vocabulary = self.vocabulary_
for i, term_count_dict in enumerate(term_count_dicts):
for term, count in term_count_dict.items():
j = vocabulary.get(term)
if j is not None:
i_indices.append(i)
j_indices.append(j)
values.append(count)
# free memory as we go
term_count_dict.clear()
shape = (len(term_count_dicts), max(vocabulary.values()) + 1)
spmatrix = sp.csr_matrix((values, (i_indices, j_indices)),
shape=shape, dtype=self.dtype)
return spmatrix
def fit(self, raw_documents, y=None):
"""Learn a vocabulary dictionary of all tokens in the raw documents
Parameters
----------
raw_documents: iterable
an iterable which yields either str, unicode or file objects
Returns
-------
self
"""
self.fit_transform(raw_documents)
return self
def fit_transform(self, raw_documents, y=None):
"""Learn the vocabulary dictionary and return the count vectors
This is more efficient than calling fit followed by transform.
Parameters
----------
raw_documents: iterable
an iterable which yields either str, unicode or file objects
Returns
-------
vectors: array, [n_samples, n_features]
"""
if self.fixed_vocabulary:
# No need to fit anything, directly perform the transformation.
# We intentionally don't call the transform method to make it
# fit_transform overridable without unwanted side effects in
# TfidfVectorizer
analyze = self.build_analyzer()
term_counts_per_doc = [PosCounter(analyze(doc), normalize=self.normalize, poscache=self.poscache)
for doc in raw_documents]
return self._term_count_dicts_to_matrix(term_counts_per_doc)
self.vocabulary_ = {}
# result of document conversion to term count dicts
term_counts_per_doc = []
term_counts = Counter()
analyze = self.build_analyzer()
for doc in raw_documents:
term_count_current = PosCounter(
analyze(doc), normalize=self.normalize, poscache=self.poscache)
term_counts.update(term_count_current)
term_counts_per_doc.append(term_count_current)
self.write_poscache()
terms = set(term_counts)
# store map from term name to feature integer index: we sort the term
# to have reproducible outcome for the vocabulary structure: otherwise
# the mapping from feature name to indices might depend on the memory
# layout of the machine. Furthermore sorted terms might make it
# possible to perform binary search in the feature names array.
self.vocabulary_ = dict(((t, i) for i, t in enumerate(sorted(terms))))
return self._term_count_dicts_to_matrix(term_counts_per_doc)
def transform(self, raw_documents):
"""Extract token counts out of raw text documents using the vocabulary
fitted with fit or the one provided in the constructor.
Parameters
----------
raw_documents: iterable
an iterable which yields either str, unicode or file objects
Returns
-------
vectors: sparse matrix, [n_samples, n_features]
"""
if not hasattr(self, 'vocabulary_') or len(self.vocabulary_) == 0:
raise ValueError("Vocabulary wasn't fitted or is empty!")
# raw_documents can be an iterable so we don't know its size in
# advance
# XXX @larsmans tried to parallelize the following loop with joblib.
# The result was some 20% slower than the serial version.
analyze = self.build_analyzer()
term_counts_per_doc = [Counter(analyze(doc)) for doc in raw_documents]
return self._term_count_dicts_to_matrix(term_counts_per_doc)
def get_feature_names(self):
"""Array mapping from feature integer indices to feature name"""
if not hasattr(self, 'vocabulary_') or len(self.vocabulary_) == 0:
raise ValueError("Vocabulary wasn't fitted or is empty!")
return [t for t, i in sorted(iter(self.vocabulary_.items()),
key=itemgetter(1))]
| mit |
pydata/xarray | xarray/tests/test_formatting.py | 1 | 18141 | import sys
from textwrap import dedent
import numpy as np
import pandas as pd
import pytest
import xarray as xr
from xarray.core import formatting
from . import requires_netCDF4
class TestFormatting:
def test_get_indexer_at_least_n_items(self):
cases = [
((20,), (slice(10),), (slice(-10, None),)),
((3, 20), (0, slice(10)), (-1, slice(-10, None))),
((2, 10), (0, slice(10)), (-1, slice(-10, None))),
((2, 5), (slice(2), slice(None)), (slice(-2, None), slice(None))),
((1, 2, 5), (0, slice(2), slice(None)), (-1, slice(-2, None), slice(None))),
((2, 3, 5), (0, slice(2), slice(None)), (-1, slice(-2, None), slice(None))),
(
(1, 10, 1),
(0, slice(10), slice(None)),
(-1, slice(-10, None), slice(None)),
),
(
(2, 5, 1),
(slice(2), slice(None), slice(None)),
(slice(-2, None), slice(None), slice(None)),
),
((2, 5, 3), (0, slice(4), slice(None)), (-1, slice(-4, None), slice(None))),
(
(2, 3, 3),
(slice(2), slice(None), slice(None)),
(slice(-2, None), slice(None), slice(None)),
),
]
for shape, start_expected, end_expected in cases:
actual = formatting._get_indexer_at_least_n_items(shape, 10, from_end=False)
assert start_expected == actual
actual = formatting._get_indexer_at_least_n_items(shape, 10, from_end=True)
assert end_expected == actual
def test_first_n_items(self):
array = np.arange(100).reshape(10, 5, 2)
for n in [3, 10, 13, 100, 200]:
actual = formatting.first_n_items(array, n)
expected = array.flat[:n]
assert (expected == actual).all()
with pytest.raises(ValueError, match=r"at least one item"):
formatting.first_n_items(array, 0)
def test_last_n_items(self):
array = np.arange(100).reshape(10, 5, 2)
for n in [3, 10, 13, 100, 200]:
actual = formatting.last_n_items(array, n)
expected = array.flat[-n:]
assert (expected == actual).all()
with pytest.raises(ValueError, match=r"at least one item"):
formatting.first_n_items(array, 0)
def test_last_item(self):
array = np.arange(100)
reshape = ((10, 10), (1, 100), (2, 2, 5, 5))
expected = np.array([99])
for r in reshape:
result = formatting.last_item(array.reshape(r))
assert result == expected
def test_format_item(self):
cases = [
(pd.Timestamp("2000-01-01T12"), "2000-01-01T12:00:00"),
(pd.Timestamp("2000-01-01"), "2000-01-01"),
(pd.Timestamp("NaT"), "NaT"),
(pd.Timedelta("10 days 1 hour"), "10 days 01:00:00"),
(pd.Timedelta("-3 days"), "-3 days +00:00:00"),
(pd.Timedelta("3 hours"), "0 days 03:00:00"),
(pd.Timedelta("NaT"), "NaT"),
("foo", "'foo'"),
(b"foo", "b'foo'"),
(1, "1"),
(1.0, "1.0"),
(np.float16(1.1234), "1.123"),
(np.float32(1.0111111), "1.011"),
(np.float64(22.222222), "22.22"),
]
for item, expected in cases:
actual = formatting.format_item(item)
assert expected == actual
def test_format_items(self):
cases = [
(np.arange(4) * np.timedelta64(1, "D"), "0 days 1 days 2 days 3 days"),
(
np.arange(4) * np.timedelta64(3, "h"),
"00:00:00 03:00:00 06:00:00 09:00:00",
),
(
np.arange(4) * np.timedelta64(500, "ms"),
"00:00:00 00:00:00.500000 00:00:01 00:00:01.500000",
),
(pd.to_timedelta(["NaT", "0s", "1s", "NaT"]), "NaT 00:00:00 00:00:01 NaT"),
(
pd.to_timedelta(["1 day 1 hour", "1 day", "0 hours"]),
"1 days 01:00:00 1 days 00:00:00 0 days 00:00:00",
),
([1, 2, 3], "1 2 3"),
]
for item, expected in cases:
actual = " ".join(formatting.format_items(item))
assert expected == actual
def test_format_array_flat(self):
actual = formatting.format_array_flat(np.arange(100), 2)
expected = "..."
assert expected == actual
actual = formatting.format_array_flat(np.arange(100), 9)
expected = "0 ... 99"
assert expected == actual
actual = formatting.format_array_flat(np.arange(100), 10)
expected = "0 1 ... 99"
assert expected == actual
actual = formatting.format_array_flat(np.arange(100), 13)
expected = "0 1 ... 98 99"
assert expected == actual
actual = formatting.format_array_flat(np.arange(100), 15)
expected = "0 1 2 ... 98 99"
assert expected == actual
# NB: Probably not ideal; an alternative would be cutting after the
# first ellipsis
actual = formatting.format_array_flat(np.arange(100.0), 11)
expected = "0.0 ... ..."
assert expected == actual
actual = formatting.format_array_flat(np.arange(100.0), 12)
expected = "0.0 ... 99.0"
assert expected == actual
actual = formatting.format_array_flat(np.arange(3), 5)
expected = "0 1 2"
assert expected == actual
actual = formatting.format_array_flat(np.arange(4.0), 11)
expected = "0.0 ... 3.0"
assert expected == actual
actual = formatting.format_array_flat(np.arange(0), 0)
expected = ""
assert expected == actual
actual = formatting.format_array_flat(np.arange(1), 1)
expected = "0"
assert expected == actual
actual = formatting.format_array_flat(np.arange(2), 3)
expected = "0 1"
assert expected == actual
actual = formatting.format_array_flat(np.arange(4), 7)
expected = "0 1 2 3"
assert expected == actual
actual = formatting.format_array_flat(np.arange(5), 7)
expected = "0 ... 4"
assert expected == actual
long_str = [" ".join(["hello world" for _ in range(100)])]
actual = formatting.format_array_flat(np.asarray([long_str]), 21)
expected = "'hello world hello..."
assert expected == actual
def test_pretty_print(self):
assert formatting.pretty_print("abcdefghij", 8) == "abcde..."
assert formatting.pretty_print("ß", 1) == "ß"
def test_maybe_truncate(self):
assert formatting.maybe_truncate("ß", 10) == "ß"
def test_format_timestamp_out_of_bounds(self):
from datetime import datetime
date = datetime(1300, 12, 1)
expected = "1300-12-01"
result = formatting.format_timestamp(date)
assert result == expected
date = datetime(2300, 12, 1)
expected = "2300-12-01"
result = formatting.format_timestamp(date)
assert result == expected
def test_attribute_repr(self):
short = formatting.summarize_attr("key", "Short string")
long = formatting.summarize_attr("key", 100 * "Very long string ")
newlines = formatting.summarize_attr("key", "\n\n\n")
tabs = formatting.summarize_attr("key", "\t\t\t")
assert short == " key: Short string"
assert len(long) <= 80
assert long.endswith("...")
assert "\n" not in newlines
assert "\t" not in tabs
def test_diff_array_repr(self):
da_a = xr.DataArray(
np.array([[1, 2, 3], [4, 5, 6]], dtype="int64"),
dims=("x", "y"),
coords={
"x": np.array(["a", "b"], dtype="U1"),
"y": np.array([1, 2, 3], dtype="int64"),
},
attrs={"units": "m", "description": "desc"},
)
da_b = xr.DataArray(
np.array([1, 2], dtype="int64"),
dims="x",
coords={
"x": np.array(["a", "c"], dtype="U1"),
"label": ("x", np.array([1, 2], dtype="int64")),
},
attrs={"units": "kg"},
)
byteorder = "<" if sys.byteorder == "little" else ">"
expected = dedent(
"""\
Left and right DataArray objects are not identical
Differing dimensions:
(x: 2, y: 3) != (x: 2)
Differing values:
L
array([[1, 2, 3],
[4, 5, 6]], dtype=int64)
R
array([1, 2], dtype=int64)
Differing coordinates:
L * x (x) %cU1 'a' 'b'
R * x (x) %cU1 'a' 'c'
Coordinates only on the left object:
* y (y) int64 1 2 3
Coordinates only on the right object:
label (x) int64 1 2
Differing attributes:
L units: m
R units: kg
Attributes only on the left object:
description: desc"""
% (byteorder, byteorder)
)
actual = formatting.diff_array_repr(da_a, da_b, "identical")
try:
assert actual == expected
except AssertionError:
# depending on platform, dtype may not be shown in numpy array repr
assert actual == expected.replace(", dtype=int64", "")
va = xr.Variable(
"x", np.array([1, 2, 3], dtype="int64"), {"title": "test Variable"}
)
vb = xr.Variable(("x", "y"), np.array([[1, 2, 3], [4, 5, 6]], dtype="int64"))
expected = dedent(
"""\
Left and right Variable objects are not equal
Differing dimensions:
(x: 3) != (x: 2, y: 3)
Differing values:
L
array([1, 2, 3], dtype=int64)
R
array([[1, 2, 3],
[4, 5, 6]], dtype=int64)"""
)
actual = formatting.diff_array_repr(va, vb, "equals")
try:
assert actual == expected
except AssertionError:
assert actual == expected.replace(", dtype=int64", "")
@pytest.mark.filterwarnings("error")
def test_diff_attrs_repr_with_array(self):
attrs_a = {"attr": np.array([0, 1])}
attrs_b = {"attr": 1}
expected = dedent(
"""\
Differing attributes:
L attr: [0 1]
R attr: 1
"""
).strip()
actual = formatting.diff_attrs_repr(attrs_a, attrs_b, "equals")
assert expected == actual
attrs_b = {"attr": np.array([-3, 5])}
expected = dedent(
"""\
Differing attributes:
L attr: [0 1]
R attr: [-3 5]
"""
).strip()
actual = formatting.diff_attrs_repr(attrs_a, attrs_b, "equals")
assert expected == actual
# should not raise a warning
attrs_b = {"attr": np.array([0, 1, 2])}
expected = dedent(
"""\
Differing attributes:
L attr: [0 1]
R attr: [0 1 2]
"""
).strip()
actual = formatting.diff_attrs_repr(attrs_a, attrs_b, "equals")
assert expected == actual
def test_diff_dataset_repr(self):
ds_a = xr.Dataset(
data_vars={
"var1": (("x", "y"), np.array([[1, 2, 3], [4, 5, 6]], dtype="int64")),
"var2": ("x", np.array([3, 4], dtype="int64")),
},
coords={
"x": np.array(["a", "b"], dtype="U1"),
"y": np.array([1, 2, 3], dtype="int64"),
},
attrs={"units": "m", "description": "desc"},
)
ds_b = xr.Dataset(
data_vars={"var1": ("x", np.array([1, 2], dtype="int64"))},
coords={
"x": ("x", np.array(["a", "c"], dtype="U1"), {"source": 0}),
"label": ("x", np.array([1, 2], dtype="int64")),
},
attrs={"units": "kg"},
)
byteorder = "<" if sys.byteorder == "little" else ">"
expected = dedent(
"""\
Left and right Dataset objects are not identical
Differing dimensions:
(x: 2, y: 3) != (x: 2)
Differing coordinates:
L * x (x) %cU1 'a' 'b'
R * x (x) %cU1 'a' 'c'
source: 0
Coordinates only on the left object:
* y (y) int64 1 2 3
Coordinates only on the right object:
label (x) int64 1 2
Differing data variables:
L var1 (x, y) int64 1 2 3 4 5 6
R var1 (x) int64 1 2
Data variables only on the left object:
var2 (x) int64 3 4
Differing attributes:
L units: m
R units: kg
Attributes only on the left object:
description: desc"""
% (byteorder, byteorder)
)
actual = formatting.diff_dataset_repr(ds_a, ds_b, "identical")
assert actual == expected
def test_array_repr(self):
ds = xr.Dataset(coords={"foo": [1, 2, 3], "bar": [1, 2, 3]})
ds[(1, 2)] = xr.DataArray([0], dims="test")
actual = formatting.array_repr(ds[(1, 2)])
expected = dedent(
"""\
<xarray.DataArray (1, 2) (test: 1)>
array([0])
Dimensions without coordinates: test"""
)
assert actual == expected
with xr.set_options(display_expand_data=False):
actual = formatting.array_repr(ds[(1, 2)])
expected = dedent(
"""\
<xarray.DataArray (1, 2) (test: 1)>
0
Dimensions without coordinates: test"""
)
assert actual == expected
def test_array_repr_variable(self):
var = xr.Variable("x", [0, 1])
formatting.array_repr(var)
with xr.set_options(display_expand_data=False):
formatting.array_repr(var)
def test_inline_variable_array_repr_custom_repr():
class CustomArray:
def __init__(self, value, attr):
self.value = value
self.attr = attr
def _repr_inline_(self, width):
formatted = f"({self.attr}) {self.value}"
if len(formatted) > width:
formatted = f"({self.attr}) ..."
return formatted
def __array_function__(self, *args, **kwargs):
return NotImplemented
@property
def shape(self):
return self.value.shape
@property
def dtype(self):
return self.value.dtype
@property
def ndim(self):
return self.value.ndim
value = CustomArray(np.array([20, 40]), "m")
variable = xr.Variable("x", value)
max_width = 10
actual = formatting.inline_variable_array_repr(variable, max_width=10)
assert actual == value._repr_inline_(max_width)
def test_set_numpy_options():
original_options = np.get_printoptions()
with formatting.set_numpy_options(threshold=10):
assert len(repr(np.arange(500))) < 200
# original options are restored
assert np.get_printoptions() == original_options
def test_short_numpy_repr():
cases = [
np.random.randn(500),
np.random.randn(20, 20),
np.random.randn(5, 10, 15),
np.random.randn(5, 10, 15, 3),
np.random.randn(100, 5, 1),
]
# number of lines:
# for default numpy repr: 167, 140, 254, 248, 599
# for short_numpy_repr: 1, 7, 24, 19, 25
for array in cases:
num_lines = formatting.short_numpy_repr(array).count("\n") + 1
assert num_lines < 30
def test_large_array_repr_length():
da = xr.DataArray(np.random.randn(100, 5, 1))
result = repr(da).splitlines()
assert len(result) < 50
@requires_netCDF4
def test_repr_file_collapsed(tmp_path):
arr = xr.DataArray(np.arange(300), dims="test")
arr.to_netcdf(tmp_path / "test.nc", engine="netcdf4")
with xr.open_dataarray(tmp_path / "test.nc") as arr, xr.set_options(
display_expand_data=False
):
actual = formatting.array_repr(arr)
expected = dedent(
"""\
<xarray.DataArray (test: 300)>
array([ 0, 1, 2, ..., 297, 298, 299])
Dimensions without coordinates: test"""
)
assert actual == expected
@pytest.mark.parametrize(
"display_max_rows, n_vars, n_attr",
[(50, 40, 30), (35, 40, 30), (11, 40, 30), (1, 40, 30)],
)
def test__mapping_repr(display_max_rows, n_vars, n_attr):
long_name = "long_name"
a = np.core.defchararray.add(long_name, np.arange(0, n_vars).astype(str))
b = np.core.defchararray.add("attr_", np.arange(0, n_attr).astype(str))
attrs = {k: 2 for k in b}
coords = dict(time=np.array([0, 1]))
data_vars = dict()
for v in a:
data_vars[v] = xr.DataArray(
name=v,
data=np.array([3, 4]),
dims=["time"],
coords=coords,
)
ds = xr.Dataset(data_vars)
ds.attrs = attrs
with xr.set_options(display_max_rows=display_max_rows):
# Parse the data_vars print and show only data_vars rows:
summary = formatting.data_vars_repr(ds.data_vars).split("\n")
summary = [v for v in summary if long_name in v]
# The length should be less than or equal to display_max_rows:
len_summary = len(summary)
data_vars_print_size = min(display_max_rows, len_summary)
assert len_summary == data_vars_print_size
with xr.set_options(
display_expand_coords=False,
display_expand_data_vars=False,
display_expand_attrs=False,
):
actual = formatting.dataset_repr(ds)
expected = dedent(
f"""\
<xarray.Dataset>
Dimensions: (time: 2)
Coordinates: (1)
Data variables: ({n_vars})
Attributes: ({n_attr})"""
)
assert actual == expected
| apache-2.0 |
moble/spinsfast | example/example_spin.py | 1 | 2304 | #/**************************************************************************
#
# Copyright 2010-2012 Kevin M. Huffenberger & Benjamin D. Wandelt
#
# This file is part of spinsfast.
#
# Spinsfast is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Spinsfast is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with spinsfast. If not, see <http://www.gnu.org/licenses/>.
#
#***************************************************************************/
#
#/* Code revision: 104, 2012-04-13 13:00:16 -0400 (Fri, 13 Apr 2012) */
import sys
sys.path.append('lib')
from numpy import zeros, complex
from numpy.random import normal,seed
# from matplotlib.pyplot import *
import spinsfast
s = 1
lmax = 127
Ntheta = 257
Nphi = 384
# For best accuracy, have lmax < Nphi/2 and lmax < Ntheta/2
#
# For the FFT part of the code:
# theta FFT is fastest for Ntheta = 2^n + 1
# phi FFT is fastest for Nphi = 2^n
#
# But the FFT part is a subdominant to the overall scaling, so lmax is
# much more important to overall speed than number of pixels
Nlm = spinsfast.N_lm(lmax);
alm = zeros(Nlm,dtype=complex)
# Fill the alm with white noise
seed(3124432)
for l in range(abs(s),lmax+1) :
for m in range(-l,l+1) :
i = spinsfast.lm_ind(l,m,lmax)
if (m==0) :
alm[i] = normal()
else :
alm[i] = normal()/2 + 1j*normal()/2
f = spinsfast.salm2map(alm,s,lmax,Ntheta,Nphi)
# In this pixelization, access the map with f[itheta,iphi]
# where 0 <= itheta <= Ntheta-1 and 0<= iphi <= Nphi-1
# and theta = pi*itheta/(Ntheta-1) phi = 2*pi*iphi/Nphi
alm2 = spinsfast.map2salm(f,s,lmax)
diff_max = max((alm-alm2))
print("max(alm2-alm) = "+str(diff_max))
# figure()
# imshow(f.real,interpolation='nearest')
# colorbar()
# title("Real part of f")
# xlabel("iphi")
# ylabel("itheta")
# show()
| gpl-3.0 |
rahuldhote/scikit-learn | sklearn/tests/test_naive_bayes.py | 142 | 17496 | import pickle
from io import BytesIO
import numpy as np
import scipy.sparse
from sklearn.datasets import load_digits, load_iris
from sklearn.cross_validation import cross_val_score, train_test_split
from sklearn.externals.six.moves import zip
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.naive_bayes import GaussianNB, BernoulliNB, MultinomialNB
# Data is just 6 separable points in the plane
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
y = np.array([1, 1, 1, 2, 2, 2])
# A bit more random tests
rng = np.random.RandomState(0)
X1 = rng.normal(size=(10, 3))
y1 = (rng.normal(size=(10)) > 0).astype(np.int)
# Data is 6 random integer points in a 100 dimensional space classified to
# three classes.
X2 = rng.randint(5, size=(6, 100))
y2 = np.array([1, 1, 2, 2, 3, 3])
def test_gnb():
# Gaussian Naive Bayes classification.
# This checks that GaussianNB implements fit and predict and returns
# correct values for a simple toy dataset.
clf = GaussianNB()
y_pred = clf.fit(X, y).predict(X)
assert_array_equal(y_pred, y)
y_pred_proba = clf.predict_proba(X)
y_pred_log_proba = clf.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba), y_pred_log_proba, 8)
# Test whether label mismatch between target y and classes raises
# an Error
# FIXME Remove this test once the more general partial_fit tests are merged
assert_raises(ValueError, GaussianNB().partial_fit, X, y, classes=[0, 1])
def test_gnb_prior():
# Test whether class priors are properly set.
clf = GaussianNB().fit(X, y)
assert_array_almost_equal(np.array([3, 3]) / 6.0,
clf.class_prior_, 8)
clf.fit(X1, y1)
# Check that the class priors sum to 1
assert_array_almost_equal(clf.class_prior_.sum(), 1)
def test_gnb_sample_weight():
"""Test whether sample weights are properly used in GNB. """
# Sample weights all being 1 should not change results
sw = np.ones(6)
clf = GaussianNB().fit(X, y)
clf_sw = GaussianNB().fit(X, y, sw)
assert_array_almost_equal(clf.theta_, clf_sw.theta_)
assert_array_almost_equal(clf.sigma_, clf_sw.sigma_)
# Fitting twice with half sample-weights should result
# in same result as fitting once with full weights
sw = rng.rand(y.shape[0])
clf1 = GaussianNB().fit(X, y, sample_weight=sw)
clf2 = GaussianNB().partial_fit(X, y, classes=[1, 2], sample_weight=sw / 2)
clf2.partial_fit(X, y, sample_weight=sw / 2)
assert_array_almost_equal(clf1.theta_, clf2.theta_)
assert_array_almost_equal(clf1.sigma_, clf2.sigma_)
# Check that duplicate entries and correspondingly increased sample
# weights yield the same result
ind = rng.randint(0, X.shape[0], 20)
sample_weight = np.bincount(ind, minlength=X.shape[0])
clf_dupl = GaussianNB().fit(X[ind], y[ind])
clf_sw = GaussianNB().fit(X, y, sample_weight)
assert_array_almost_equal(clf_dupl.theta_, clf_sw.theta_)
assert_array_almost_equal(clf_dupl.sigma_, clf_sw.sigma_)
def test_discrete_prior():
# Test whether class priors are properly set.
for cls in [BernoulliNB, MultinomialNB]:
clf = cls().fit(X2, y2)
assert_array_almost_equal(np.log(np.array([2, 2, 2]) / 6.0),
clf.class_log_prior_, 8)
def test_mnnb():
# Test Multinomial Naive Bayes classification.
# This checks that MultinomialNB implements fit and predict and returns
# correct values for a simple toy dataset.
for X in [X2, scipy.sparse.csr_matrix(X2)]:
# Check the ability to predict the learning set.
clf = MultinomialNB()
assert_raises(ValueError, clf.fit, -X, y2)
y_pred = clf.fit(X, y2).predict(X)
assert_array_equal(y_pred, y2)
# Verify that np.log(clf.predict_proba(X)) gives the same results as
# clf.predict_log_proba(X)
y_pred_proba = clf.predict_proba(X)
y_pred_log_proba = clf.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba), y_pred_log_proba, 8)
# Check that incremental fitting yields the same results
clf2 = MultinomialNB()
clf2.partial_fit(X[:2], y2[:2], classes=np.unique(y2))
clf2.partial_fit(X[2:5], y2[2:5])
clf2.partial_fit(X[5:], y2[5:])
y_pred2 = clf2.predict(X)
assert_array_equal(y_pred2, y2)
y_pred_proba2 = clf2.predict_proba(X)
y_pred_log_proba2 = clf2.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba2), y_pred_log_proba2, 8)
assert_array_almost_equal(y_pred_proba2, y_pred_proba)
assert_array_almost_equal(y_pred_log_proba2, y_pred_log_proba)
# Partial fit on the whole data at once should be the same as fit too
clf3 = MultinomialNB()
clf3.partial_fit(X, y2, classes=np.unique(y2))
y_pred3 = clf3.predict(X)
assert_array_equal(y_pred3, y2)
y_pred_proba3 = clf3.predict_proba(X)
y_pred_log_proba3 = clf3.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba3), y_pred_log_proba3, 8)
assert_array_almost_equal(y_pred_proba3, y_pred_proba)
assert_array_almost_equal(y_pred_log_proba3, y_pred_log_proba)
def check_partial_fit(cls):
clf1 = cls()
clf1.fit([[0, 1], [1, 0]], [0, 1])
clf2 = cls()
clf2.partial_fit([[0, 1], [1, 0]], [0, 1], classes=[0, 1])
assert_array_equal(clf1.class_count_, clf2.class_count_)
assert_array_equal(clf1.feature_count_, clf2.feature_count_)
clf3 = cls()
clf3.partial_fit([[0, 1]], [0], classes=[0, 1])
clf3.partial_fit([[1, 0]], [1])
assert_array_equal(clf1.class_count_, clf3.class_count_)
assert_array_equal(clf1.feature_count_, clf3.feature_count_)
def test_discretenb_partial_fit():
for cls in [MultinomialNB, BernoulliNB]:
yield check_partial_fit, cls
def test_gnb_partial_fit():
clf = GaussianNB().fit(X, y)
clf_pf = GaussianNB().partial_fit(X, y, np.unique(y))
assert_array_almost_equal(clf.theta_, clf_pf.theta_)
assert_array_almost_equal(clf.sigma_, clf_pf.sigma_)
assert_array_almost_equal(clf.class_prior_, clf_pf.class_prior_)
clf_pf2 = GaussianNB().partial_fit(X[0::2, :], y[0::2], np.unique(y))
clf_pf2.partial_fit(X[1::2], y[1::2])
assert_array_almost_equal(clf.theta_, clf_pf2.theta_)
assert_array_almost_equal(clf.sigma_, clf_pf2.sigma_)
assert_array_almost_equal(clf.class_prior_, clf_pf2.class_prior_)
def test_discretenb_pickle():
# Test picklability of discrete naive Bayes classifiers
for cls in [BernoulliNB, MultinomialNB, GaussianNB]:
clf = cls().fit(X2, y2)
y_pred = clf.predict(X2)
store = BytesIO()
pickle.dump(clf, store)
clf = pickle.load(BytesIO(store.getvalue()))
assert_array_equal(y_pred, clf.predict(X2))
if cls is not GaussianNB:
# TODO re-enable me when partial_fit is implemented for GaussianNB
# Test pickling of estimator trained with partial_fit
clf2 = cls().partial_fit(X2[:3], y2[:3], classes=np.unique(y2))
clf2.partial_fit(X2[3:], y2[3:])
store = BytesIO()
pickle.dump(clf2, store)
clf2 = pickle.load(BytesIO(store.getvalue()))
assert_array_equal(y_pred, clf2.predict(X2))
def test_input_check_fit():
# Test input checks for the fit method
for cls in [BernoulliNB, MultinomialNB, GaussianNB]:
# check shape consistency for number of samples at fit time
assert_raises(ValueError, cls().fit, X2, y2[:-1])
# check shape consistency for number of input features at predict time
clf = cls().fit(X2, y2)
assert_raises(ValueError, clf.predict, X2[:, :-1])
def test_input_check_partial_fit():
for cls in [BernoulliNB, MultinomialNB]:
# check shape consistency
assert_raises(ValueError, cls().partial_fit, X2, y2[:-1],
classes=np.unique(y2))
# classes is required for first call to partial fit
assert_raises(ValueError, cls().partial_fit, X2, y2)
# check consistency of consecutive classes values
clf = cls()
clf.partial_fit(X2, y2, classes=np.unique(y2))
assert_raises(ValueError, clf.partial_fit, X2, y2,
classes=np.arange(42))
# check consistency of input shape for partial_fit
assert_raises(ValueError, clf.partial_fit, X2[:, :-1], y2)
# check consistency of input shape for predict
assert_raises(ValueError, clf.predict, X2[:, :-1])
def test_discretenb_predict_proba():
# Test discrete NB classes' probability scores
# The 100s below distinguish Bernoulli from multinomial.
# FIXME: write a test to show this.
X_bernoulli = [[1, 100, 0], [0, 1, 0], [0, 100, 1]]
X_multinomial = [[0, 1], [1, 3], [4, 0]]
# test binary case (1-d output)
y = [0, 0, 2] # 2 is regression test for binary case, 02e673
for cls, X in zip([BernoulliNB, MultinomialNB],
[X_bernoulli, X_multinomial]):
clf = cls().fit(X, y)
assert_equal(clf.predict(X[-1]), 2)
assert_equal(clf.predict_proba(X[0]).shape, (1, 2))
assert_array_almost_equal(clf.predict_proba(X[:2]).sum(axis=1),
np.array([1., 1.]), 6)
# test multiclass case (2-d output, must sum to one)
y = [0, 1, 2]
for cls, X in zip([BernoulliNB, MultinomialNB],
[X_bernoulli, X_multinomial]):
clf = cls().fit(X, y)
assert_equal(clf.predict_proba(X[0]).shape, (1, 3))
assert_equal(clf.predict_proba(X[:2]).shape, (2, 3))
assert_almost_equal(np.sum(clf.predict_proba(X[1])), 1)
assert_almost_equal(np.sum(clf.predict_proba(X[-1])), 1)
assert_almost_equal(np.sum(np.exp(clf.class_log_prior_)), 1)
assert_almost_equal(np.sum(np.exp(clf.intercept_)), 1)
def test_discretenb_uniform_prior():
# Test whether discrete NB classes fit a uniform prior
# when fit_prior=False and class_prior=None
for cls in [BernoulliNB, MultinomialNB]:
clf = cls()
clf.set_params(fit_prior=False)
clf.fit([[0], [0], [1]], [0, 0, 1])
prior = np.exp(clf.class_log_prior_)
assert_array_equal(prior, np.array([.5, .5]))
def test_discretenb_provide_prior():
# Test whether discrete NB classes use provided prior
for cls in [BernoulliNB, MultinomialNB]:
clf = cls(class_prior=[0.5, 0.5])
clf.fit([[0], [0], [1]], [0, 0, 1])
prior = np.exp(clf.class_log_prior_)
assert_array_equal(prior, np.array([.5, .5]))
# Inconsistent number of classes with prior
assert_raises(ValueError, clf.fit, [[0], [1], [2]], [0, 1, 2])
assert_raises(ValueError, clf.partial_fit, [[0], [1]], [0, 1],
classes=[0, 1, 1])
def test_discretenb_provide_prior_with_partial_fit():
# Test whether discrete NB classes use provided prior
# when using partial_fit
iris = load_iris()
iris_data1, iris_data2, iris_target1, iris_target2 = train_test_split(
iris.data, iris.target, test_size=0.4, random_state=415)
for cls in [BernoulliNB, MultinomialNB]:
for prior in [None, [0.3, 0.3, 0.4]]:
clf_full = cls(class_prior=prior)
clf_full.fit(iris.data, iris.target)
clf_partial = cls(class_prior=prior)
clf_partial.partial_fit(iris_data1, iris_target1,
classes=[0, 1, 2])
clf_partial.partial_fit(iris_data2, iris_target2)
assert_array_almost_equal(clf_full.class_log_prior_,
clf_partial.class_log_prior_)
def test_sample_weight_multiclass():
for cls in [BernoulliNB, MultinomialNB]:
# check shape consistency for number of samples at fit time
yield check_sample_weight_multiclass, cls
def check_sample_weight_multiclass(cls):
X = [
[0, 0, 1],
[0, 1, 1],
[0, 1, 1],
[1, 0, 0],
]
y = [0, 0, 1, 2]
sample_weight = np.array([1, 1, 2, 2], dtype=np.float)
sample_weight /= sample_weight.sum()
clf = cls().fit(X, y, sample_weight=sample_weight)
assert_array_equal(clf.predict(X), [0, 1, 1, 2])
# Check sample weight using the partial_fit method
clf = cls()
clf.partial_fit(X[:2], y[:2], classes=[0, 1, 2],
sample_weight=sample_weight[:2])
clf.partial_fit(X[2:3], y[2:3], sample_weight=sample_weight[2:3])
clf.partial_fit(X[3:], y[3:], sample_weight=sample_weight[3:])
assert_array_equal(clf.predict(X), [0, 1, 1, 2])
def test_sample_weight_mnb():
clf = MultinomialNB()
clf.fit([[1, 2], [1, 2], [1, 0]],
[0, 0, 1],
sample_weight=[1, 1, 4])
assert_array_equal(clf.predict([1, 0]), [1])
positive_prior = np.exp(clf.intercept_[0])
assert_array_almost_equal([1 - positive_prior, positive_prior],
[1 / 3., 2 / 3.])
def test_coef_intercept_shape():
# coef_ and intercept_ should have shapes as in other linear models.
# Non-regression test for issue #2127.
X = [[1, 0, 0], [1, 1, 1]]
y = [1, 2] # binary classification
for clf in [MultinomialNB(), BernoulliNB()]:
clf.fit(X, y)
assert_equal(clf.coef_.shape, (1, 3))
assert_equal(clf.intercept_.shape, (1,))
def test_check_accuracy_on_digits():
# Non regression test to make sure that any further refactoring / optim
# of the NB models do not harm the performance on a slightly non-linearly
# separable dataset
digits = load_digits()
X, y = digits.data, digits.target
binary_3v8 = np.logical_or(digits.target == 3, digits.target == 8)
X_3v8, y_3v8 = X[binary_3v8], y[binary_3v8]
# Multinomial NB
scores = cross_val_score(MultinomialNB(alpha=10), X, y, cv=10)
assert_greater(scores.mean(), 0.86)
scores = cross_val_score(MultinomialNB(alpha=10), X_3v8, y_3v8, cv=10)
assert_greater(scores.mean(), 0.94)
# Bernoulli NB
scores = cross_val_score(BernoulliNB(alpha=10), X > 4, y, cv=10)
assert_greater(scores.mean(), 0.83)
scores = cross_val_score(BernoulliNB(alpha=10), X_3v8 > 4, y_3v8, cv=10)
assert_greater(scores.mean(), 0.92)
# Gaussian NB
scores = cross_val_score(GaussianNB(), X, y, cv=10)
assert_greater(scores.mean(), 0.77)
scores = cross_val_score(GaussianNB(), X_3v8, y_3v8, cv=10)
assert_greater(scores.mean(), 0.86)
def test_feature_log_prob_bnb():
# Test for issue #4268.
# Tests that the feature log prob value computed by BernoulliNB when
# alpha=1.0 is equal to the expression given in Manning, Raghavan,
# and Schuetze's "Introduction to Information Retrieval" book:
# http://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html
X = np.array([[0, 0, 0], [1, 1, 0], [0, 1, 0], [1, 0, 1], [0, 1, 0]])
Y = np.array([0, 0, 1, 2, 2])
# Fit Bernoulli NB w/ alpha = 1.0
clf = BernoulliNB(alpha=1.0)
clf.fit(X, Y)
# Manually form the (log) numerator and denominator that
# constitute P(feature presence | class)
num = np.log(clf.feature_count_ + 1.0)
denom = np.tile(np.log(clf.class_count_ + 2.0), (X.shape[1], 1)).T
# Check manual estimate matches
assert_array_equal(clf.feature_log_prob_, (num - denom))
def test_bnb():
# Tests that BernoulliNB when alpha=1.0 gives the same values as
# those given for the toy example in Manning, Raghavan, and
# Schuetze's "Introduction to Information Retrieval" book:
# http://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html
# Training data points are:
# Chinese Beijing Chinese (class: China)
# Chinese Chinese Shanghai (class: China)
# Chinese Macao (class: China)
# Tokyo Japan Chinese (class: Japan)
# Features are Beijing, Chinese, Japan, Macao, Shanghai, and Tokyo
X = np.array([[1, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 1, 0, 1, 0, 0],
[0, 1, 1, 0, 0, 1]])
# Classes are China (0), Japan (1)
Y = np.array([0, 0, 0, 1])
# Fit BernoulliBN w/ alpha = 1.0
clf = BernoulliNB(alpha=1.0)
clf.fit(X, Y)
# Check the class prior is correct
class_prior = np.array([0.75, 0.25])
assert_array_almost_equal(np.exp(clf.class_log_prior_), class_prior)
# Check the feature probabilities are correct
feature_prob = np.array([[0.4, 0.8, 0.2, 0.4, 0.4, 0.2],
[1/3.0, 2/3.0, 2/3.0, 1/3.0, 1/3.0, 2/3.0]])
assert_array_almost_equal(np.exp(clf.feature_log_prob_), feature_prob)
# Testing data point is:
# Chinese Chinese Chinese Tokyo Japan
X_test = np.array([0, 1, 1, 0, 0, 1])
# Check the predictive probabilities are correct
unnorm_predict_proba = np.array([[0.005183999999999999,
0.02194787379972565]])
predict_proba = unnorm_predict_proba / np.sum(unnorm_predict_proba)
assert_array_almost_equal(clf.predict_proba(X_test), predict_proba)
| bsd-3-clause |
JosmanPS/scikit-learn | examples/feature_selection/plot_rfe_with_cross_validation.py | 226 | 1384 | """
===================================================
Recursive feature elimination with cross-validation
===================================================
A recursive feature elimination example with automatic tuning of the
number of features selected with cross-validation.
"""
print(__doc__)
import matplotlib.pyplot as plt
from sklearn.svm import SVC
from sklearn.cross_validation import StratifiedKFold
from sklearn.feature_selection import RFECV
from sklearn.datasets import make_classification
# Build a classification task using 3 informative features
X, y = make_classification(n_samples=1000, n_features=25, n_informative=3,
n_redundant=2, n_repeated=0, n_classes=8,
n_clusters_per_class=1, random_state=0)
# Create the RFE object and compute a cross-validated score.
svc = SVC(kernel="linear")
# The "accuracy" scoring is proportional to the number of correct
# classifications
rfecv = RFECV(estimator=svc, step=1, cv=StratifiedKFold(y, 2),
scoring='accuracy')
rfecv.fit(X, y)
print("Optimal number of features : %d" % rfecv.n_features_)
# Plot number of features VS. cross-validation scores
plt.figure()
plt.xlabel("Number of features selected")
plt.ylabel("Cross validation score (nb of correct classifications)")
plt.plot(range(1, len(rfecv.grid_scores_) + 1), rfecv.grid_scores_)
plt.show()
| bsd-3-clause |
DucQuang1/py-earth | pyearth/earth.py | 1 | 31698 | from ._forward import ForwardPasser
from ._pruning import PruningPasser
from ._util import ascii_table, apply_weights_2d, apply_weights_1d, gcv
from sklearn.base import RegressorMixin, BaseEstimator, TransformerMixin
from sklearn.utils.validation import (assert_all_finite, check_is_fitted,
check_X_y)
import numpy as np
from scipy import sparse
class Earth(BaseEstimator, RegressorMixin, TransformerMixin):
"""
Multivariate Adaptive Regression Splines
A flexible regression method that automatically searches for interactions
and non-linear relationships. Earth models can be thought of as
linear models in a higher dimensional basis space
(specifically, a multivariate truncated power spline basis).
Each term in an Earth model is a product of so called "hinge functions".
A hinge function is a function that's equal to its argument where that
argument is greater than zero and is zero everywhere else.
The multivariate adaptive regression splines algorithm has two stages.
First, the forward pass searches for terms in the truncated power spline
basis that locally minimize the squared error loss of the training set.
Next, a pruning pass selects a subset of those terms that produces
a locally minimal generalized cross-validation (GCV) score. The GCV score
is not actually based on cross-validation, but rather is meant to
approximate a true cross-validation score by penalizing model complexity.
The final result is a set of terms that is nonlinear in the original
feature space, may include interactions, and is likely to generalize well.
The Earth class supports dense input only. Data structures from the
pandas and patsy modules are supported, but are copied into numpy arrays
for computation. No copy is made if the inputs are numpy float64 arrays.
Earth objects can be serialized using the pickle module and copied
using the copy module.
Parameters
----------
max_terms : int, optional (default=2*n + 10, where n is the
number of features)
The maximum number of terms generated by the forward pass.
max_degree : int, optional (default=1)
The maximum degree of terms generated by the forward pass.
penalty : float, optional (default=3.0)
A smoothing parameter used to calculate GCV and GRSQ.
Used during the pruning pass and to determine whether to add a hinge
or linear basis function during the forward pass.
See the d parameter in equation 32, Friedman, 1991.
endspan_alpha : float, optional, probability between 0 and 1 (default=0.05)
A parameter controlling the calculation of the endspan
parameter (below). The endspan parameter is calculated as
round(3 - log2(endspan_alpha/n)), where n is the number of features.
The endspan_alpha parameter represents the probability of a run of
positive or negative error values on either end of the data vector
of any feature in the data set. See equation 45, Friedman, 1991.
endspan : int, optional (default=-1)
The number of extreme data values of each feature not eligible
as knot locations. If endspan is set to -1 (default) then the
endspan parameter is calculated based on endspan_alpah (above).
If endspan is set to a positive integer then endspan_alpha is ignored.
minspan_alpha : float, optional, probability between 0 and 1 (default=0.05)
A parameter controlling the calculation of the minspan
parameter (below). The minspan parameter is calculated as
(int) -log2(-(1.0/(n*count))*log(1.0-minspan_alpha)) / 2.5
where n is the number of features and count is the number of points at
which the parent term is non-zero. The minspan_alpha parameter
represents the probability of a run of positive or negative error values
between adjacent knots separated by minspan intervening data points.
See equation 43, Friedman, 1991.
minspan : int, optional (default=-1)
The minimal number of data points between knots. If minspan is set
to -1 (default) then the minspan parameter is calculated based on
minspan_alpha (above). If minspan is set to a positive integer then
minspan_alpha is ignored.
thresh : float, optional (defaul=0.001)
Parameter used when evaluating stopping conditions for the forward
pass. If either RSQ > 1 - thresh or if RSQ increases by less than
thresh for a forward pass iteration then the forward pass is terminated.
min_search_points : int, optional (default=100)
Used to calculate check_every (below). The minimum samples necessary
for check_every to be greater than 1. The check_every parameter
is calculated as
(int) m / min_search_points
if m > min_search_points, where m is the number of samples in the
training set. If m <= min_search_points then check_every is set to 1.
check_every : int, optional (default=-1)
If check_every > 0, only one of every check_every sorted data points
is considered as a candidate knot. If check_every is set to -1 then
the check_every parameter is calculated based on
min_search_points (above).
allow_linear : bool, optional (default=True)
If True, the forward pass will check the GCV of each new pair of terms
and, if it's not an improvement on a single term with no knot
(called a linear term, although it may actually be a product of a linear
term with some other parent term), then only that single, knotless term
will be used. If False, that behavior is disabled and all terms
will have knots except those with variables specified by the linvars
argument (see the fit method).
smooth : bool, optional (default=False)
If True, the model will be smoothed such that it has continuous first
derivatives.
For details, see section 3.7, Friedman, 1991.
enable_pruning : bool, optional(default=True)
If False, the pruning pass will be skipped.
Attributes
----------
`coef_` : array, shape = [pruned basis length]
The weights of the model terms that have not been pruned.
`basis_` : _basis.Basis
An object representing model terms. Each term is a product of
constant, linear, and hinge functions of the input features.
`mse_` : float
The mean squared error of the model after the final linear fit.
If sample_weight is given, this score is weighted appropriately.
`rsq_` : float
The generalized r^2 of the model after the final linear fit.
If sample_weight is given, this score is weighted appropriately.
`gcv_` : float
The generalized cross validation (GCV) score of the model after the
final linear fit. If sample_weight is given, this score is
weighted appropriately.
`grsq_` : float
An r^2 like score based on the GCV. If sample_weight is given, this
score is weighted appropriately.
`forward_pass_record_` : _record.ForwardPassRecord
An object containing information about the forward pass, such as
training loss function values after each iteration and the final
stopping condition.
`pruning_pass_record_` : _record.PruningPassRecord
An object containing information about the pruning pass, such as
training loss function values after each iteration and the
selected optimal iteration.
`xlabels_` : list
List of column names for training predictors.
Defaults to ['x0','x1',....] if column names are not provided.
References
----------
.. [1] Friedman, Jerome. Multivariate Adaptive Regression Splines.
Annals of Statistics. Volume 19, Number 1 (1991), 1-67.
"""
forward_pass_arg_names = set([
'max_terms', 'max_degree', 'penalty',
'endspan_alpha', 'endspan',
'minspan_alpha', 'minspan',
'thresh', 'min_search_points', 'check_every',
'allow_linear'
])
pruning_pass_arg_names = set([
'penalty'
])
def __init__(self, max_terms=None, max_degree=None, penalty=None,
endspan_alpha=None, endspan=None,
minspan_alpha=None, minspan=None,
thresh=None, min_search_points=None, check_every=None,
allow_linear=None, smooth=None, enable_pruning=True):
kwargs = {}
call = locals()
for name in self._get_param_names():
if call[name] is not None:
kwargs[name] = call[name]
self.set_params(**kwargs)
def __eq__(self, other):
if self.__class__ is not other.__class__:
return False
keys = set(self.__dict__.keys()) | set(other.__dict__.keys())
for k in keys:
try:
v_self = self.__dict__[k]
v_other = other.__dict__[k]
except KeyError:
return False
try:
if v_self != v_other:
return False
except ValueError: # Case of numpy arrays
if np.any(v_self != v_other):
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
def _pull_forward_args(self, **kwargs):
'''
Pull named arguments relevant to the forward pass.
'''
result = {}
for name in self.forward_pass_arg_names:
if name in kwargs:
result[name] = kwargs[name]
return result
def _pull_pruning_args(self, **kwargs):
'''
Pull named arguments relevant to the pruning pass.
'''
result = {}
for name in self.pruning_pass_arg_names:
if name in kwargs:
result[name] = kwargs[name]
return result
def _scrape_labels(self, X):
'''
Try to get labels from input data (for example, if X is a
pandas DataFrame). Return None if no labels can be extracted.
'''
try:
labels = list(X.columns)
except AttributeError:
try:
labels = list(X.design_info.column_names)
except AttributeError:
try:
labels = list(X.dtype.names)
except TypeError:
try:
labels = ['x%d' % i for i in range(X.shape[1])]
except IndexError:
labels = ['x%d' % i for i in range(1)]
return labels
def _scrub_x(self, X, **kwargs):
'''
Sanitize input predictors and extract column names if appropriate.
'''
# Check for sparseness
if sparse.issparse(X):
raise TypeError('A sparse matrix was passed, but dense data '
'is required. Use X.toarray() to convert to dense.')
# Convert to internally used data type
X = np.asarray(X, dtype=np.float64, order='F')
assert_all_finite(X)
if X.ndim == 1:
X = X[:, np.newaxis]
# Ensure correct number of columns
if hasattr(self, 'basis_') and self.basis_ is not None:
if X.shape[1] != self.basis_.num_variables:
raise ValueError('Wrong number of columns in X')
return X
def _scrub(self, X, y, sample_weight, **kwargs):
'''
Sanitize input data.
'''
# Check for sparseness
if sparse.issparse(y):
raise TypeError('A sparse matrix was passed, but dense data '
'is required. Use y.toarray() to convert to dense.')
if sparse.issparse(sample_weight):
raise TypeError('A sparse matrix was passed, but dense data '
'is required. Use sample_weight.toarray()'
'to convert to dense.')
# Check whether X is the output of patsy.dmatrices
if y is None and isinstance(X, tuple):
y, X = X
# Handle X separately
X = self._scrub_x(X, **kwargs)
# Convert y to internally used data type
y = np.asarray(y, dtype=np.float64)
assert_all_finite(y)
y = y.reshape(y.shape[0])
# Deal with sample_weight
if sample_weight is None:
sample_weight = np.ones(y.shape[0], dtype=y.dtype)
else:
sample_weight = np.asarray(sample_weight)
assert_all_finite(sample_weight)
sample_weight = sample_weight.reshape(sample_weight.shape[0])
# Make sure dimensions match
if y.shape[0] != X.shape[0]:
raise ValueError('X and y do not have compatible dimensions.')
if y.shape != sample_weight.shape:
raise ValueError(
'y and sample_weight do not have compatible dimensions.')
# Make sure everything is finite
assert_all_finite(X)
assert_all_finite(y)
assert_all_finite(sample_weight)
return X, y, sample_weight
def fit(self, X, y=None, sample_weight=None, xlabels=None, linvars=[]):
'''
Fit an Earth model to the input data X and y.
Parameters
----------
X : array-like, shape = [m, n] where m is the number of samples
and n is the number of features The training predictors.
The X parameter can be a numpy array, a pandas DataFrame, a patsy
DesignMatrix, or a tuple of patsy DesignMatrix objects as
output by patsy.dmatrices.
y : array-like, optional (default=None), shape = [m] where m is the
number of samples The training response. The y parameter can be
a numpy array, a pandas DataFrame with one column, a Patsy
DesignMatrix, or can be left as None (default) if X was the output
of a call to patsy.dmatrices (in which case, X contains the
response).
sample_weight : array-like, optional (default=None), shape = [m] where
m is the number of samples
Sample weights for training. Weights must be greater than or equal
to zero. Rows with greater weights contribute more strongly to the
fitted model. Rows with zero weight do not contribute at all.
Weights are useful when dealing with heteroscedasticity. In such
cases, the weight should be proportional to the inverse of the
(known) variance.
linvars : iterable of strings or ints, optional (empty by default)
Used to specify features that may only enter terms as linear basis
functions (without knots). Can include both column numbers and
column names (see xlabels, below). If left empty, some variables
may still enter linearly during the forward pass if no knot would
provide a reduction in GCV compared to the linear function.
Note that this feature differs from the R package earth.
xlabels : iterable of strings, optional (empty by default)
The xlabels argument can be used to assign names to data columns.
This argument is not generally needed, as names can be captured
automatically from most standard data structures.
If included, must have length n, where n is the number of features.
Note that column order is used to compute term values and make
predictions, not column names.
'''
check_X_y(X, y, accept_sparse=None, multi_output=True)
# Format and label the data
if xlabels is None:
self.xlabels_ = self._scrape_labels(X)
else:
if len(xlabels) != X.shape[1]:
raise ValueError('The length of xlabels is not the '
'same as the number of columns of X')
self.xlabels_ = xlabels
self.linvars_ = linvars
X, y, sample_weight = self._scrub(X, y, sample_weight)
# Do the actual work
self.__forward_pass(X, y, sample_weight, self.xlabels_, linvars)
if self.enable_pruning is True:
self.__pruning_pass(X, y, sample_weight)
if hasattr(self, 'smooth') and self.smooth:
self.basis_ = self.basis_.smooth(X)
self.__linear_fit(X, y, sample_weight)
return self
def __forward_pass(
self, X, y=None, sample_weight=None, xlabels=None, linvars=[]):
'''
Perform the forward pass of the multivariate adaptive regression
splines algorithm. Users will normally want to call the fit method
instead, which performs the forward pass, the pruning pass,
and a linear fit to determine the final model coefficients.
Parameters
----------
X : array-like, shape = [m, n] where m is the number of samples and n is
the number of features The training predictors. The X parameter can
be a numpy array, a pandas DataFrame, a patsy DesignMatrix, or a
tuple of patsy DesignMatrix objects as output by patsy.dmatrices.
y : array-like, optional (default=None), shape = [m] where m is the
number of samples The training response. The y parameter can be
a numpy array, a pandas DataFrame with one column, a Patsy
DesignMatrix, or can be left as None (default) if X was the output
of a call to patsy.dmatrices (in which case, X contains the
response).
sample_weight : array-like, optional (default=None), shape = [m] where m
is the number of samples
Sample weights for training. Weights must be greater than or
equal to zero. Rows with greater weights contribute more strongly
to the fitted model. Rows with zero weight do not contribute at
all. Weights are useful when dealing with heteroscedasticity.
In such cases, the weight should be proportional to the inverse
of the (known) variance.
linvars : iterable of strings or ints, optional (empty by default)
Used to specify features that may only enter terms as linear basis
functions (without knots). Can include both column numbers an
column names (see xlabels, below).
xlabels : iterable of strings, optional (empty by default)
The xlabels argument can be used to assign names to data columns.
This argument is not generally needed, as names can be captured
automatically from most standard data structures. If included, must
have length n, where n is the number of features. Note that column
order is used to compute term values and make predictions, not
column names.
'''
# Label and format data
if xlabels is None:
self.xlabels_ = self._scrape_labels(X)
else:
self.xlabels_ = xlabels
X, y, sample_weight = self._scrub(X, y, sample_weight)
# Do the actual work
args = self._pull_forward_args(**self.__dict__)
forward_passer = ForwardPasser(
X, y, sample_weight, xlabels=self.xlabels_, linvars=linvars, **args)
forward_passer.run()
self.forward_pass_record_ = forward_passer.trace()
self.basis_ = forward_passer.get_basis()
def __pruning_pass(self, X, y=None, sample_weight=None):
'''
Perform the pruning pass of the multivariate adaptive regression
splines algorithm. Users will normally want to call the fit
method instead, which performs the forward pass, the pruning
pass, and a linear fit to determine the final model coefficients.
Parameters
----------
X : array-like, shape = [m, n] where m is the number of samples
and n is the number of features The training predictors.
The X parameter can be a numpy array, a pandas DataFrame, a patsy
DesignMatrix, or a tuple of patsy DesignMatrix objects as output
by patsy.dmatrices.
y : array-like, optional (default=None), shape = [m] where m is the
number of samples The training response. The y parameter can be
a numpy array, a pandas DataFrame with one column, a Patsy
DesignMatrix, or can be left as None (default) if X was the output
of a call to patsy.dmatrices (in which case, X contains
the response).
sample_weight : array-like, optional (default=None), shape = [m]
where m is the number of samples
Sample weights for training. Weights must be greater than or
equal to zero. Rows with greater weights contribute more strongly
to the fitted model. Rows with zero weight do not contribute at
all. Weights are useful when dealing with heteroscedasticity.
In such cases, the weight should be proportional to the inverse
of the (known) variance.
'''
# Format data
X, y, sample_weight = self._scrub(X, y, sample_weight)
# Pull arguments from self
args = self._pull_pruning_args(**self.__dict__)
# Do the actual work
pruning_passer = PruningPasser(
self.basis_, X, y, sample_weight, **args)
pruning_passer.run()
self.pruning_pass_record_ = pruning_passer.trace()
def forward_trace(self):
'''Return information about the forward pass.'''
try:
return self.forward_pass_record_
except AttributeError:
return None
def pruning_trace(self):
'''Return information about the pruning pass.'''
try:
return self.pruning_pass_record_
except AttributeError:
return None
def trace(self):
'''Return information about the forward and pruning passes.'''
return EarthTrace(self.forward_trace(), self.pruning_trace())
def summary(self):
'''Return a string describing the model.'''
result = ''
if self.forward_trace() is None:
result += 'Untrained Earth Model'
return result
elif self.pruning_trace() is None:
result += 'Unpruned Earth Model\n'
else:
result += 'Earth Model\n'
header = ['Basis Function', 'Pruned', 'Coefficient']
data = []
i = 0
for bf in self.basis_:
data.append([str(bf), 'Yes' if bf.is_pruned() else 'No', '%g' %
self.coef_[i] if not bf.is_pruned() else 'None'])
if not bf.is_pruned():
i += 1
result += ascii_table(header, data)
if self.pruning_trace() is not None:
record = self.pruning_trace()
selection = record.get_selected()
else:
record = self.forward_trace()
selection = len(record) - 1
result += '\n'
result += 'MSE: %.4f, GCV: %.4f, RSQ: %.4f, GRSQ: %.4f' % (
self.mse_, self.gcv_, self.rsq_, self.grsq_)
return result
def __linear_fit(self, X, y=None, sample_weight=None):
'''
Solve the linear least squares problem to determine the coefficients
of the unpruned basis functions.
Parameters
----------
X : array-like, shape = [m, n] where m is the number of samples and n
is the number of features The training predictors. The X parameter
can be a numpy array, a pandas DataFrame, a patsy
DesignMatrix, or a tuple of patsy DesignMatrix objects as output
by patsy.dmatrices.
y : array-like, optional (default=None), shape = [m] where m is the
number of samples The training response. The y parameter can be
a numpy array, a pandas DataFrame with one column, a Patsy
DesignMatrix, or can be left as None (default) if X was the output
of a call to patsy.dmatrices (in which case, X contains the
response).
sample_weight : array-like, optional (default=None), shape = [m] where
m is the number of samples
Sample weights for training. Weights must be greater than or equal
to zero. Rows with greater weights contribute more strongly to
the fitted model. Rows with zero weight do not contribute at all.
Weights are useful when dealing with heteroscedasticity. In such
cases, the weight should be proportional to the inverse of the
(known) variance.
'''
# Format data
X, y, sample_weight = self._scrub(X, y, sample_weight)
# Transform into basis space
B = self.transform(X)
# Apply weights to B
apply_weights_2d(B, sample_weight)
# Apply weights to y
weighted_y = y.copy()
apply_weights_1d(weighted_y, sample_weight)
# Solve the linear least squares problem
self.coef_, resid = np.linalg.lstsq(B, weighted_y)[0:2]
# Compute the final mse, gcv, rsq, and grsq (may be different from the
# pruning scores if the model has been smoothed)
self.mse_ = np.sum(resid) / float(X.shape[0])
self.gcv_ = gcv(
self.mse_, self.coef_.shape[0], X.shape[0], self.get_penalty())
y_avg = np.average(y, weights=sample_weight)
y_sqr = sample_weight * (y - y_avg) ** 2
mse0 = np.sum(y_sqr) / float(X.shape[0])
gcv0 = gcv(mse0, 1, X.shape[0], self.get_penalty())
self.rsq_ = 1.0 - (self.mse_ / mse0)
self.grsq_ = 1.0 - (self.gcv_ / gcv0)
def predict(self, X):
'''
Predict the response based on the input data X.
Parameters
----------
X : array-like, shape = [m, n] where m is the number of samples and n
is the number of features
The training predictors. The X parameter can be a numpy
array, a pandas DataFrame, or a patsy DesignMatrix.
'''
X = self._scrub_x(X)
B = self.transform(X)
return np.dot(B, self.coef_)
def predict_deriv(self, X, variables=None):
'''
Predict the first derivatives of the response based on the input data X.
Parameters
----------
X : array-like, shape = [m, n] where m is the number of samples and n is
the number of features The training predictors. The X parameter can
be a numpy array, a pandas DataFrame, or a patsy DesignMatrix.
variables : list
The variables over which derivatives will be computed. Each column
in the resulting array corresponds to a variable. If not
specified, all variables are used (even if some are not relevant
to the final model and have derivatives that are identically zero).
'''
check_is_fitted(self, "basis_")
if type(variables) in (str, int):
variables = [variables]
if variables is None:
variables_of_interest = list(range(len(self.xlabels_)))
else:
variables_of_interest = []
for var in variables:
if isinstance(var, int):
variables_of_interest.append(var)
else:
variables_of_interest.append(self.xlabels_.index(var))
X = self._scrub_x(X)
J = np.zeros(shape=(X.shape[0], len(variables_of_interest)))
b = np.empty(shape=X.shape[0])
j = np.empty(shape=X.shape[0])
self.basis_.transform_deriv(
X, b, j, self.coef_, J, variables_of_interest, True)
return J
def score(self, X, y=None, sample_weight=None):
'''
Calculate the generalized r^2 of the model on data X and y.
Parameters
----------
X : array-like, shape = [m, n] where m is the number of samples
and n is the number of features The training predictors.
The X parameter can be a numpy array, a pandas DataFrame, a patsy
DesignMatrix, or a tuple of patsy DesignMatrix objects as output
by patsy.dmatrices.
y : array-like, optional (default=None), shape = [m] where m is the
number of samples The training response. The y parameter can be
a numpy array, a pandas DataFrame with one column, a Patsy
DesignMatrix, or can be left as None (default) if X was the
output of a call to patsy.dmatrices (in which case, X contains
the response).
sample_weight : array-like, optional (default=None), shape = [m] where
m is the number of samples
Sample weights for training. Weights must be greater than or
equal to zero. Rows with greater weights contribute more strongly
to the fitted model. Rows with zero weight do not contribute at
all. Weights are useful when dealing with heteroscedasticity.
In such cases, the weight should be proportional to the inverse of
the (known) variance.
'''
check_is_fitted(self, "basis_")
X, y, sample_weight = self._scrub(X, y, sample_weight)
y_hat = self.predict(X)
m, _ = X.shape
residual = y - y_hat
mse = np.sum(sample_weight * (residual ** 2)) / m
y_avg = np.average(y, weights=sample_weight)
y_sqr = sample_weight * (y - y_avg) ** 2
mse0 = np.sum(y_sqr) / m
return 1 - (mse / mse0)
def transform(self, X):
'''
Transform X into the basis space. Normally, users will call the
predict method instead, which both transforms into basis space
calculates the weighted sum of basis terms to produce a prediction
of the response. Users may wish to call transform directly in some
cases. For example, users may wish to apply other statistical or
machine learning algorithms, such as generalized linear regression,
in basis space.
Parameters
----------
X : array-like, shape = [m, n] where m is the number of samples and n
is the number of features
The training predictors. The X parameter can be a numpy array, a
pandas DataFrame, or a patsy DesignMatrix.
'''
check_is_fitted(self, "basis_")
X = self._scrub_x(X)
B = np.empty(shape=(X.shape[0], self.basis_.plen()), order='F')
self.basis_.transform(X, B)
return B
def get_penalty(self):
'''Get the penalty parameter being used. Default is 3.'''
if 'penalty' in self.__dict__ and self.penalty is not None:
return self.penalty
else:
return 3.0
class EarthTrace(object):
def __init__(self, forward_trace, pruning_trace):
self.forward_trace = forward_trace
self.pruning_trace = pruning_trace
def __eq__(self, other):
return (self.__class__ is other.__class__ and
self.forward_trace == other.forward_trace and
self.pruning_trace == other.pruning_trace)
def __str__(self):
return str(self.forward_trace) + '\n' + str(self.pruning_trace)
| bsd-3-clause |
rcrowder/nupic | src/nupic/frameworks/viz/networkx_renderer.py | 14 | 1425 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2017, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import networkx as nx
import matplotlib.pyplot as plt
class NetworkXRenderer(object):
"""
Network visualization "renderer" implementation to render a network with
graphviz.
"""
def __init__(self, layoutFn=nx.spring_layout):
self.layoutFn = layoutFn
def render(self, graph):
pos = self.layoutFn(graph)
nx.draw_networkx(graph, pos)
nx.draw_networkx_edge_labels(graph, pos, clip_on=False, rotate=False)
plt.show()
| agpl-3.0 |
grochmal/capybara | src/proto/chopimg.py | 1 | 1388 | #!/usr/bin/env python
import os,sys
from PIL import Image
import numpy as np
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
from matplotlib import cm
__doc__ = 'Usage: chopimg.py file n output'
def mark(ar, p):
yn, xn = ar.shape
xticks = [l*(xn/p) for l in range(1,p)]
yticks = [l*(yn/p) for l in range(1,p)]
for tic in xticks: ar[:,[tic-1,tic,tic+1]] = 255
for tic in yticks: ar[[tic-1,tic,tic+1],:] = 255
return ar
def chop(ar, p):
yn, xn = ar.shape
bits = [0 for i in range(p*p)]
for i in range(p):
for j in range(p):
bits[i*p+j] = ar[yn*i/p:yn*(i+1)/p,xn*j/p:xn*(j+1)/p]
return bits
def plot(img, ps, p):
plt.subplot(p+1,p,p/2+1)
plt.imshow(img, cmap=cm.gray)
plt.xticks([])
plt.yticks([])
for i in range(p*p):
plt.subplot(p+1,p,p+1+i)
plt.imshow(ps[i], cmap=cm.gray)
plt.xticks([])
plt.yticks([])
def chop_img(img, n, out):
im = np.array(Image.open(img))
ps = chop(im, n)
mk = mark(im.copy(), n)
plot(mk, ps, n)
#plt.show()
plt.savefig(out, dpi=300)
if '__main__' == __name__:
if 4 != len(sys.argv):
print __doc__
exit(0)
if not os.path.isfile(sys.argv[1]):
print sys.argv[1], ': no such file'
exit(1)
n = int(sys.argv[2])
chop_img(sys.argv[1], n, sys.argv[3])
| gpl-3.0 |
ThomasBrouwer/BNMTF | experiments/experiments_toy/time/nmf_icm_time.py | 1 | 2443 | """
Recover the toy dataset generated by example/generate_toy/bnmf/generate_bnmf.py
using ICM, and plot the MSE against timestamps.
We can plot the MSE, R2 and Rp as it converges, on the entire dataset.
We have I=100, J=80, K=10, and no test data.
We give flatter priors (1/10) than what was used to generate the data (1).
"""
import sys, os
project_location = os.path.dirname(__file__)+"/../../../../"
sys.path.append(project_location)
from BNMTF.code.models.nmf_icm import nmf_icm
import numpy, random, scipy, matplotlib.pyplot as plt
##########
input_folder = project_location+"BNMTF/data_toy/bnmf/"
repeats = 10
iterations = 2000
init_UV = 'random'
I, J, K = 100,80,10
minimum_TN = 0.1
alpha, beta = 1., 1. #1., 1.
lambdaU = numpy.ones((I,K))/10.
lambdaV = numpy.ones((J,K))/10.
priors = { 'alpha':alpha, 'beta':beta, 'lambdaU':lambdaU, 'lambdaV':lambdaV }
# Load in data
R = numpy.loadtxt(input_folder+"R.txt")
M = numpy.ones((I,J))
# Run the VB algorithm, <repeats> times
times_repeats = []
performances_repeats = []
for i in range(0,repeats):
# Set all the seeds
numpy.random.seed(0)
random.seed(0)
scipy.random.seed(0)
# Run the classifier
nmf = nmf_icm(R,M,K,priors)
nmf.initialise(init_UV)
nmf.run(iterations,minimum_TN=minimum_TN)
# Extract the performances and timestamps across all iterations
times_repeats.append(nmf.all_times)
performances_repeats.append(nmf.all_performances)
# Check whether seed worked: all performances should be the same
assert all([numpy.array_equal(performances, performances_repeats[0]) for performances in performances_repeats]), \
"Seed went wrong - performances not the same across repeats!"
# Print out the performances, and the average times
icm_all_times_average = list(numpy.average(times_repeats, axis=0))
icm_all_performances = performances_repeats[0]
print "icm_all_times_average = %s" % icm_all_times_average
print "icm_all_performances = %s" % icm_all_performances
# Print all time plots, the average, and performance vs iterations
plt.figure()
plt.title("Performance against time")
plt.ylim(0,10)
for times in times_repeats:
plt.plot(times, icm_all_performances['MSE'])
plt.figure()
plt.title("Performance against average time")
plt.plot(icm_all_times_average, icm_all_performances['MSE'])
plt.ylim(0,10)
plt.figure()
plt.title("Performance against iteration")
plt.plot(icm_all_performances['MSE'])
plt.ylim(0,10) | apache-2.0 |
BigDataforYou/movie_recommendation_workshop_1 | big_data_4_you_demo_1/venv/lib/python2.7/site-packages/pandas/tests/indexes/test_multi.py | 1 | 88291 | # -*- coding: utf-8 -*-
from datetime import timedelta
from itertools import product
import nose
import re
import warnings
from pandas import (date_range, MultiIndex, Index, CategoricalIndex,
compat)
from pandas.core.common import PerformanceWarning
from pandas.indexes.base import InvalidIndexError
from pandas.compat import range, lrange, u, PY3, long, lzip
import numpy as np
from pandas.util.testing import (assert_almost_equal, assertRaises,
assertRaisesRegexp, assert_copy)
import pandas.util.testing as tm
import pandas as pd
from pandas.lib import Timestamp
from .common import Base
class TestMultiIndex(Base, tm.TestCase):
_holder = MultiIndex
_multiprocess_can_split_ = True
_compat_props = ['shape', 'ndim', 'size', 'itemsize']
def setUp(self):
major_axis = Index(['foo', 'bar', 'baz', 'qux'])
minor_axis = Index(['one', 'two'])
major_labels = np.array([0, 0, 1, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 0, 1])
self.index_names = ['first', 'second']
self.indices = dict(index=MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels
], names=self.index_names,
verify_integrity=False))
self.setup_indices()
def create_index(self):
return self.index
def test_boolean_context_compat2(self):
# boolean context compat
# GH7897
i1 = MultiIndex.from_tuples([('A', 1), ('A', 2)])
i2 = MultiIndex.from_tuples([('A', 1), ('A', 3)])
common = i1.intersection(i2)
def f():
if common:
pass
tm.assertRaisesRegexp(ValueError, 'The truth value of a', f)
def test_labels_dtypes(self):
# GH 8456
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
self.assertTrue(i.labels[0].dtype == 'int8')
self.assertTrue(i.labels[1].dtype == 'int8')
i = MultiIndex.from_product([['a'], range(40)])
self.assertTrue(i.labels[1].dtype == 'int8')
i = MultiIndex.from_product([['a'], range(400)])
self.assertTrue(i.labels[1].dtype == 'int16')
i = MultiIndex.from_product([['a'], range(40000)])
self.assertTrue(i.labels[1].dtype == 'int32')
i = pd.MultiIndex.from_product([['a'], range(1000)])
self.assertTrue((i.labels[0] >= 0).all())
self.assertTrue((i.labels[1] >= 0).all())
def test_repeat(self):
reps = 2
numbers = [1, 2, 3]
names = np.array(['foo', 'bar'])
m = MultiIndex.from_product([
numbers, names], names=names)
expected = MultiIndex.from_product([
numbers, names.repeat(reps)], names=names)
tm.assert_index_equal(m.repeat(reps), expected)
def test_numpy_repeat(self):
reps = 2
numbers = [1, 2, 3]
names = np.array(['foo', 'bar'])
m = MultiIndex.from_product([
numbers, names], names=names)
expected = MultiIndex.from_product([
numbers, names.repeat(reps)], names=names)
tm.assert_index_equal(np.repeat(m, reps), expected)
msg = "the 'axis' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, np.repeat, m, reps, axis=1)
def test_set_name_methods(self):
# so long as these are synonyms, we don't need to test set_names
self.assertEqual(self.index.rename, self.index.set_names)
new_names = [name + "SUFFIX" for name in self.index_names]
ind = self.index.set_names(new_names)
self.assertEqual(self.index.names, self.index_names)
self.assertEqual(ind.names, new_names)
with assertRaisesRegexp(ValueError, "^Length"):
ind.set_names(new_names + new_names)
new_names2 = [name + "SUFFIX2" for name in new_names]
res = ind.set_names(new_names2, inplace=True)
self.assertIsNone(res)
self.assertEqual(ind.names, new_names2)
# set names for specific level (# GH7792)
ind = self.index.set_names(new_names[0], level=0)
self.assertEqual(self.index.names, self.index_names)
self.assertEqual(ind.names, [new_names[0], self.index_names[1]])
res = ind.set_names(new_names2[0], level=0, inplace=True)
self.assertIsNone(res)
self.assertEqual(ind.names, [new_names2[0], self.index_names[1]])
# set names for multiple levels
ind = self.index.set_names(new_names, level=[0, 1])
self.assertEqual(self.index.names, self.index_names)
self.assertEqual(ind.names, new_names)
res = ind.set_names(new_names2, level=[0, 1], inplace=True)
self.assertIsNone(res)
self.assertEqual(ind.names, new_names2)
def test_set_levels(self):
# side note - you probably wouldn't want to use levels and labels
# directly like this - but it is possible.
levels = self.index.levels
new_levels = [[lev + 'a' for lev in level] for level in levels]
def assert_matching(actual, expected):
# avoid specifying internal representation
# as much as possible
self.assertEqual(len(actual), len(expected))
for act, exp in zip(actual, expected):
act = np.asarray(act)
exp = np.asarray(exp)
assert_almost_equal(act, exp)
# level changing [w/o mutation]
ind2 = self.index.set_levels(new_levels)
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# level changing [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels, inplace=True)
self.assertIsNone(inplace_return)
assert_matching(ind2.levels, new_levels)
# level changing specific level [w/o mutation]
ind2 = self.index.set_levels(new_levels[0], level=0)
assert_matching(ind2.levels, [new_levels[0], levels[1]])
assert_matching(self.index.levels, levels)
ind2 = self.index.set_levels(new_levels[1], level=1)
assert_matching(ind2.levels, [levels[0], new_levels[1]])
assert_matching(self.index.levels, levels)
# level changing multiple levels [w/o mutation]
ind2 = self.index.set_levels(new_levels, level=[0, 1])
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# level changing specific level [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels[0], level=0, inplace=True)
self.assertIsNone(inplace_return)
assert_matching(ind2.levels, [new_levels[0], levels[1]])
assert_matching(self.index.levels, levels)
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels[1], level=1, inplace=True)
self.assertIsNone(inplace_return)
assert_matching(ind2.levels, [levels[0], new_levels[1]])
assert_matching(self.index.levels, levels)
# level changing multiple levels [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels, level=[0, 1],
inplace=True)
self.assertIsNone(inplace_return)
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
def test_set_labels(self):
# side note - you probably wouldn't want to use levels and labels
# directly like this - but it is possible.
labels = self.index.labels
major_labels, minor_labels = labels
major_labels = [(x + 1) % 3 for x in major_labels]
minor_labels = [(x + 1) % 1 for x in minor_labels]
new_labels = [major_labels, minor_labels]
def assert_matching(actual, expected):
# avoid specifying internal representation
# as much as possible
self.assertEqual(len(actual), len(expected))
for act, exp in zip(actual, expected):
act = np.asarray(act)
exp = np.asarray(exp)
assert_almost_equal(act, exp)
# label changing [w/o mutation]
ind2 = self.index.set_labels(new_labels)
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels, inplace=True)
self.assertIsNone(inplace_return)
assert_matching(ind2.labels, new_labels)
# label changing specific level [w/o mutation]
ind2 = self.index.set_labels(new_labels[0], level=0)
assert_matching(ind2.labels, [new_labels[0], labels[1]])
assert_matching(self.index.labels, labels)
ind2 = self.index.set_labels(new_labels[1], level=1)
assert_matching(ind2.labels, [labels[0], new_labels[1]])
assert_matching(self.index.labels, labels)
# label changing multiple levels [w/o mutation]
ind2 = self.index.set_labels(new_labels, level=[0, 1])
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing specific level [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels[0], level=0, inplace=True)
self.assertIsNone(inplace_return)
assert_matching(ind2.labels, [new_labels[0], labels[1]])
assert_matching(self.index.labels, labels)
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels[1], level=1, inplace=True)
self.assertIsNone(inplace_return)
assert_matching(ind2.labels, [labels[0], new_labels[1]])
assert_matching(self.index.labels, labels)
# label changing multiple levels [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels, level=[0, 1],
inplace=True)
self.assertIsNone(inplace_return)
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
def test_set_levels_labels_names_bad_input(self):
levels, labels = self.index.levels, self.index.labels
names = self.index.names
with tm.assertRaisesRegexp(ValueError, 'Length of levels'):
self.index.set_levels([levels[0]])
with tm.assertRaisesRegexp(ValueError, 'Length of labels'):
self.index.set_labels([labels[0]])
with tm.assertRaisesRegexp(ValueError, 'Length of names'):
self.index.set_names([names[0]])
# shouldn't scalar data error, instead should demand list-like
with tm.assertRaisesRegexp(TypeError, 'list of lists-like'):
self.index.set_levels(levels[0])
# shouldn't scalar data error, instead should demand list-like
with tm.assertRaisesRegexp(TypeError, 'list of lists-like'):
self.index.set_labels(labels[0])
# shouldn't scalar data error, instead should demand list-like
with tm.assertRaisesRegexp(TypeError, 'list-like'):
self.index.set_names(names[0])
# should have equal lengths
with tm.assertRaisesRegexp(TypeError, 'list of lists-like'):
self.index.set_levels(levels[0], level=[0, 1])
with tm.assertRaisesRegexp(TypeError, 'list-like'):
self.index.set_levels(levels, level=0)
# should have equal lengths
with tm.assertRaisesRegexp(TypeError, 'list of lists-like'):
self.index.set_labels(labels[0], level=[0, 1])
with tm.assertRaisesRegexp(TypeError, 'list-like'):
self.index.set_labels(labels, level=0)
# should have equal lengths
with tm.assertRaisesRegexp(ValueError, 'Length of names'):
self.index.set_names(names[0], level=[0, 1])
with tm.assertRaisesRegexp(TypeError, 'string'):
self.index.set_names(names, level=0)
def test_metadata_immutable(self):
levels, labels = self.index.levels, self.index.labels
# shouldn't be able to set at either the top level or base level
mutable_regex = re.compile('does not support mutable operations')
with assertRaisesRegexp(TypeError, mutable_regex):
levels[0] = levels[0]
with assertRaisesRegexp(TypeError, mutable_regex):
levels[0][0] = levels[0][0]
# ditto for labels
with assertRaisesRegexp(TypeError, mutable_regex):
labels[0] = labels[0]
with assertRaisesRegexp(TypeError, mutable_regex):
labels[0][0] = labels[0][0]
# and for names
names = self.index.names
with assertRaisesRegexp(TypeError, mutable_regex):
names[0] = names[0]
def test_inplace_mutation_resets_values(self):
levels = [['a', 'b', 'c'], [4]]
levels2 = [[1, 2, 3], ['a']]
labels = [[0, 1, 0, 2, 2, 0], [0, 0, 0, 0, 0, 0]]
mi1 = MultiIndex(levels=levels, labels=labels)
mi2 = MultiIndex(levels=levels2, labels=labels)
vals = mi1.values.copy()
vals2 = mi2.values.copy()
self.assertIsNotNone(mi1._tuples)
# make sure level setting works
new_vals = mi1.set_levels(levels2).values
assert_almost_equal(vals2, new_vals)
# non-inplace doesn't kill _tuples [implementation detail]
assert_almost_equal(mi1._tuples, vals)
# and values is still same too
assert_almost_equal(mi1.values, vals)
# inplace should kill _tuples
mi1.set_levels(levels2, inplace=True)
assert_almost_equal(mi1.values, vals2)
# make sure label setting works too
labels2 = [[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]]
exp_values = np.empty((6, ), dtype=object)
exp_values[:] = [(long(1), 'a')] * 6
# must be 1d array of tuples
self.assertEqual(exp_values.shape, (6, ))
new_values = mi2.set_labels(labels2).values
# not inplace shouldn't change
assert_almost_equal(mi2._tuples, vals2)
# should have correct values
assert_almost_equal(exp_values, new_values)
# and again setting inplace should kill _tuples, etc
mi2.set_labels(labels2, inplace=True)
assert_almost_equal(mi2.values, new_values)
def test_copy_in_constructor(self):
levels = np.array(["a", "b", "c"])
labels = np.array([1, 1, 2, 0, 0, 1, 1])
val = labels[0]
mi = MultiIndex(levels=[levels, levels], labels=[labels, labels],
copy=True)
self.assertEqual(mi.labels[0][0], val)
labels[0] = 15
self.assertEqual(mi.labels[0][0], val)
val = levels[0]
levels[0] = "PANDA"
self.assertEqual(mi.levels[0][0], val)
def test_set_value_keeps_names(self):
# motivating example from #3742
lev1 = ['hans', 'hans', 'hans', 'grethe', 'grethe', 'grethe']
lev2 = ['1', '2', '3'] * 2
idx = pd.MultiIndex.from_arrays([lev1, lev2], names=['Name', 'Number'])
df = pd.DataFrame(
np.random.randn(6, 4),
columns=['one', 'two', 'three', 'four'],
index=idx)
df = df.sortlevel()
self.assertIsNone(df.is_copy)
self.assertEqual(df.index.names, ('Name', 'Number'))
df = df.set_value(('grethe', '4'), 'one', 99.34)
self.assertIsNone(df.is_copy)
self.assertEqual(df.index.names, ('Name', 'Number'))
def test_names(self):
# names are assigned in __init__
names = self.index_names
level_names = [level.name for level in self.index.levels]
self.assertEqual(names, level_names)
# setting bad names on existing
index = self.index
assertRaisesRegexp(ValueError, "^Length of names", setattr, index,
"names", list(index.names) + ["third"])
assertRaisesRegexp(ValueError, "^Length of names", setattr, index,
"names", [])
# initializing with bad names (should always be equivalent)
major_axis, minor_axis = self.index.levels
major_labels, minor_labels = self.index.labels
assertRaisesRegexp(ValueError, "^Length of names", MultiIndex,
levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels],
names=['first'])
assertRaisesRegexp(ValueError, "^Length of names", MultiIndex,
levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels],
names=['first', 'second', 'third'])
# names are assigned
index.names = ["a", "b"]
ind_names = list(index.names)
level_names = [level.name for level in index.levels]
self.assertEqual(ind_names, level_names)
def test_reference_duplicate_name(self):
idx = MultiIndex.from_tuples(
[('a', 'b'), ('c', 'd')], names=['x', 'x'])
self.assertTrue(idx._reference_duplicate_name('x'))
idx = MultiIndex.from_tuples(
[('a', 'b'), ('c', 'd')], names=['x', 'y'])
self.assertFalse(idx._reference_duplicate_name('x'))
def test_astype(self):
expected = self.index.copy()
actual = self.index.astype('O')
assert_copy(actual.levels, expected.levels)
assert_copy(actual.labels, expected.labels)
self.check_level_names(actual, expected.names)
with assertRaisesRegexp(TypeError, "^Setting.*dtype.*object"):
self.index.astype(np.dtype(int))
def test_constructor_single_level(self):
single_level = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux']],
labels=[[0, 1, 2, 3]], names=['first'])
tm.assertIsInstance(single_level, Index)
self.assertNotIsInstance(single_level, MultiIndex)
self.assertEqual(single_level.name, 'first')
single_level = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux']],
labels=[[0, 1, 2, 3]])
self.assertIsNone(single_level.name)
def test_constructor_no_levels(self):
assertRaisesRegexp(ValueError, "non-zero number of levels/labels",
MultiIndex, levels=[], labels=[])
both_re = re.compile('Must pass both levels and labels')
with tm.assertRaisesRegexp(TypeError, both_re):
MultiIndex(levels=[])
with tm.assertRaisesRegexp(TypeError, both_re):
MultiIndex(labels=[])
def test_constructor_mismatched_label_levels(self):
labels = [np.array([1]), np.array([2]), np.array([3])]
levels = ["a"]
assertRaisesRegexp(ValueError, "Length of levels and labels must be"
" the same", MultiIndex, levels=levels,
labels=labels)
length_error = re.compile('>= length of level')
label_error = re.compile(r'Unequal label lengths: \[4, 2\]')
# important to check that it's looking at the right thing.
with tm.assertRaisesRegexp(ValueError, length_error):
MultiIndex(levels=[['a'], ['b']],
labels=[[0, 1, 2, 3], [0, 3, 4, 1]])
with tm.assertRaisesRegexp(ValueError, label_error):
MultiIndex(levels=[['a'], ['b']], labels=[[0, 0, 0, 0], [0, 0]])
# external API
with tm.assertRaisesRegexp(ValueError, length_error):
self.index.copy().set_levels([['a'], ['b']])
with tm.assertRaisesRegexp(ValueError, label_error):
self.index.copy().set_labels([[0, 0, 0, 0], [0, 0]])
# deprecated properties
with warnings.catch_warnings():
warnings.simplefilter('ignore')
with tm.assertRaisesRegexp(ValueError, length_error):
self.index.copy().levels = [['a'], ['b']]
with tm.assertRaisesRegexp(ValueError, label_error):
self.index.copy().labels = [[0, 0, 0, 0], [0, 0]]
def assert_multiindex_copied(self, copy, original):
# levels should be (at least, shallow copied)
assert_copy(copy.levels, original.levels)
assert_almost_equal(copy.labels, original.labels)
# labels doesn't matter which way copied
assert_almost_equal(copy.labels, original.labels)
self.assertIsNot(copy.labels, original.labels)
# names doesn't matter which way copied
self.assertEqual(copy.names, original.names)
self.assertIsNot(copy.names, original.names)
# sort order should be copied
self.assertEqual(copy.sortorder, original.sortorder)
def test_copy(self):
i_copy = self.index.copy()
self.assert_multiindex_copied(i_copy, self.index)
def test_shallow_copy(self):
i_copy = self.index._shallow_copy()
self.assert_multiindex_copied(i_copy, self.index)
def test_view(self):
i_view = self.index.view()
self.assert_multiindex_copied(i_view, self.index)
def check_level_names(self, index, names):
self.assertEqual([level.name for level in index.levels], list(names))
def test_changing_names(self):
# names should be applied to levels
level_names = [level.name for level in self.index.levels]
self.check_level_names(self.index, self.index.names)
view = self.index.view()
copy = self.index.copy()
shallow_copy = self.index._shallow_copy()
# changing names should change level names on object
new_names = [name + "a" for name in self.index.names]
self.index.names = new_names
self.check_level_names(self.index, new_names)
# but not on copies
self.check_level_names(view, level_names)
self.check_level_names(copy, level_names)
self.check_level_names(shallow_copy, level_names)
# and copies shouldn't change original
shallow_copy.names = [name + "c" for name in shallow_copy.names]
self.check_level_names(self.index, new_names)
def test_duplicate_names(self):
self.index.names = ['foo', 'foo']
assertRaisesRegexp(KeyError, 'Level foo not found',
self.index._get_level_number, 'foo')
def test_get_level_number_integer(self):
self.index.names = [1, 0]
self.assertEqual(self.index._get_level_number(1), 0)
self.assertEqual(self.index._get_level_number(0), 1)
self.assertRaises(IndexError, self.index._get_level_number, 2)
assertRaisesRegexp(KeyError, 'Level fourth not found',
self.index._get_level_number, 'fourth')
def test_from_arrays(self):
arrays = []
for lev, lab in zip(self.index.levels, self.index.labels):
arrays.append(np.asarray(lev).take(lab))
result = MultiIndex.from_arrays(arrays)
self.assertEqual(list(result), list(self.index))
# infer correctly
result = MultiIndex.from_arrays([[pd.NaT, Timestamp('20130101')],
['a', 'b']])
self.assertTrue(result.levels[0].equals(Index([Timestamp('20130101')
])))
self.assertTrue(result.levels[1].equals(Index(['a', 'b'])))
def test_from_arrays_index_series_datetimetz(self):
idx1 = pd.date_range('2015-01-01 10:00', freq='D', periods=3,
tz='US/Eastern')
idx2 = pd.date_range('2015-01-01 10:00', freq='H', periods=3,
tz='Asia/Tokyo')
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_series_timedelta(self):
idx1 = pd.timedelta_range('1 days', freq='D', periods=3)
idx2 = pd.timedelta_range('2 hours', freq='H', periods=3)
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_series_period(self):
idx1 = pd.period_range('2011-01-01', freq='D', periods=3)
idx2 = pd.period_range('2015-01-01', freq='H', periods=3)
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result, result2)
def test_from_product(self):
first = ['foo', 'bar', 'buz']
second = ['a', 'b', 'c']
names = ['first', 'second']
result = MultiIndex.from_product([first, second], names=names)
tuples = [('foo', 'a'), ('foo', 'b'), ('foo', 'c'), ('bar', 'a'),
('bar', 'b'), ('bar', 'c'), ('buz', 'a'), ('buz', 'b'),
('buz', 'c')]
expected = MultiIndex.from_tuples(tuples, names=names)
tm.assert_numpy_array_equal(result, expected)
self.assertEqual(result.names, names)
def test_from_product_datetimeindex(self):
dt_index = date_range('2000-01-01', periods=2)
mi = pd.MultiIndex.from_product([[1, 2], dt_index])
etalon = pd.lib.list_to_object_array([(1, pd.Timestamp(
'2000-01-01')), (1, pd.Timestamp('2000-01-02')), (2, pd.Timestamp(
'2000-01-01')), (2, pd.Timestamp('2000-01-02'))])
tm.assert_numpy_array_equal(mi.values, etalon)
def test_values_boxed(self):
tuples = [(1, pd.Timestamp('2000-01-01')), (2, pd.NaT),
(3, pd.Timestamp('2000-01-03')),
(1, pd.Timestamp('2000-01-04')),
(2, pd.Timestamp('2000-01-02')),
(3, pd.Timestamp('2000-01-03'))]
mi = pd.MultiIndex.from_tuples(tuples)
tm.assert_numpy_array_equal(mi.values,
pd.lib.list_to_object_array(tuples))
# Check that code branches for boxed values produce identical results
tm.assert_numpy_array_equal(mi.values[:4], mi[:4].values)
def test_append(self):
result = self.index[:3].append(self.index[3:])
self.assertTrue(result.equals(self.index))
foos = [self.index[:1], self.index[1:3], self.index[3:]]
result = foos[0].append(foos[1:])
self.assertTrue(result.equals(self.index))
# empty
result = self.index.append([])
self.assertTrue(result.equals(self.index))
def test_get_level_values(self):
result = self.index.get_level_values(0)
expected = ['foo', 'foo', 'bar', 'baz', 'qux', 'qux']
tm.assert_numpy_array_equal(result, expected)
self.assertEqual(result.name, 'first')
result = self.index.get_level_values('first')
expected = self.index.get_level_values(0)
tm.assert_numpy_array_equal(result, expected)
# GH 10460
index = MultiIndex(levels=[CategoricalIndex(
['A', 'B']), CategoricalIndex([1, 2, 3])], labels=[np.array(
[0, 0, 0, 1, 1, 1]), np.array([0, 1, 2, 0, 1, 2])])
exp = CategoricalIndex(['A', 'A', 'A', 'B', 'B', 'B'])
self.assert_index_equal(index.get_level_values(0), exp)
exp = CategoricalIndex([1, 2, 3, 1, 2, 3])
self.assert_index_equal(index.get_level_values(1), exp)
def test_get_level_values_na(self):
arrays = [['a', 'b', 'b'], [1, np.nan, 2]]
index = pd.MultiIndex.from_arrays(arrays)
values = index.get_level_values(1)
expected = [1, np.nan, 2]
tm.assert_numpy_array_equal(values.values.astype(float), expected)
arrays = [['a', 'b', 'b'], [np.nan, np.nan, 2]]
index = pd.MultiIndex.from_arrays(arrays)
values = index.get_level_values(1)
expected = [np.nan, np.nan, 2]
tm.assert_numpy_array_equal(values.values.astype(float), expected)
arrays = [[np.nan, np.nan, np.nan], ['a', np.nan, 1]]
index = pd.MultiIndex.from_arrays(arrays)
values = index.get_level_values(0)
expected = [np.nan, np.nan, np.nan]
tm.assert_numpy_array_equal(values.values.astype(float), expected)
values = index.get_level_values(1)
expected = np.array(['a', np.nan, 1], dtype=object)
tm.assert_numpy_array_equal(values.values, expected)
arrays = [['a', 'b', 'b'], pd.DatetimeIndex([0, 1, pd.NaT])]
index = pd.MultiIndex.from_arrays(arrays)
values = index.get_level_values(1)
expected = pd.DatetimeIndex([0, 1, pd.NaT])
tm.assert_numpy_array_equal(values.values, expected.values)
arrays = [[], []]
index = pd.MultiIndex.from_arrays(arrays)
values = index.get_level_values(0)
self.assertEqual(values.shape, (0, ))
def test_reorder_levels(self):
# this blows up
assertRaisesRegexp(IndexError, '^Too many levels',
self.index.reorder_levels, [2, 1, 0])
def test_nlevels(self):
self.assertEqual(self.index.nlevels, 2)
def test_iter(self):
result = list(self.index)
expected = [('foo', 'one'), ('foo', 'two'), ('bar', 'one'),
('baz', 'two'), ('qux', 'one'), ('qux', 'two')]
self.assertEqual(result, expected)
def test_legacy_pickle(self):
if PY3:
raise nose.SkipTest("testing for legacy pickles not "
"support on py3")
path = tm.get_data_path('multiindex_v1.pickle')
obj = pd.read_pickle(path)
obj2 = MultiIndex.from_tuples(obj.values)
self.assertTrue(obj.equals(obj2))
res = obj.get_indexer(obj)
exp = np.arange(len(obj))
assert_almost_equal(res, exp)
res = obj.get_indexer(obj2[::-1])
exp = obj.get_indexer(obj[::-1])
exp2 = obj2.get_indexer(obj2[::-1])
assert_almost_equal(res, exp)
assert_almost_equal(exp, exp2)
def test_legacy_v2_unpickle(self):
# 0.7.3 -> 0.8.0 format manage
path = tm.get_data_path('mindex_073.pickle')
obj = pd.read_pickle(path)
obj2 = MultiIndex.from_tuples(obj.values)
self.assertTrue(obj.equals(obj2))
res = obj.get_indexer(obj)
exp = np.arange(len(obj))
assert_almost_equal(res, exp)
res = obj.get_indexer(obj2[::-1])
exp = obj.get_indexer(obj[::-1])
exp2 = obj2.get_indexer(obj2[::-1])
assert_almost_equal(res, exp)
assert_almost_equal(exp, exp2)
def test_roundtrip_pickle_with_tz(self):
# GH 8367
# round-trip of timezone
index = MultiIndex.from_product(
[[1, 2], ['a', 'b'], date_range('20130101', periods=3,
tz='US/Eastern')
], names=['one', 'two', 'three'])
unpickled = self.round_trip_pickle(index)
self.assertTrue(index.equal_levels(unpickled))
def test_from_tuples_index_values(self):
result = MultiIndex.from_tuples(self.index)
self.assertTrue((result.values == self.index.values).all())
def test_contains(self):
self.assertIn(('foo', 'two'), self.index)
self.assertNotIn(('bar', 'two'), self.index)
self.assertNotIn(None, self.index)
def test_is_all_dates(self):
self.assertFalse(self.index.is_all_dates)
def test_is_numeric(self):
# MultiIndex is never numeric
self.assertFalse(self.index.is_numeric())
def test_getitem(self):
# scalar
self.assertEqual(self.index[2], ('bar', 'one'))
# slice
result = self.index[2:5]
expected = self.index[[2, 3, 4]]
self.assertTrue(result.equals(expected))
# boolean
result = self.index[[True, False, True, False, True, True]]
result2 = self.index[np.array([True, False, True, False, True, True])]
expected = self.index[[0, 2, 4, 5]]
self.assertTrue(result.equals(expected))
self.assertTrue(result2.equals(expected))
def test_getitem_group_select(self):
sorted_idx, _ = self.index.sortlevel(0)
self.assertEqual(sorted_idx.get_loc('baz'), slice(3, 4))
self.assertEqual(sorted_idx.get_loc('foo'), slice(0, 2))
def test_get_loc(self):
self.assertEqual(self.index.get_loc(('foo', 'two')), 1)
self.assertEqual(self.index.get_loc(('baz', 'two')), 3)
self.assertRaises(KeyError, self.index.get_loc, ('bar', 'two'))
self.assertRaises(KeyError, self.index.get_loc, 'quux')
self.assertRaises(NotImplementedError, self.index.get_loc, 'foo',
method='nearest')
# 3 levels
index = MultiIndex(levels=[Index(lrange(4)), Index(lrange(4)), Index(
lrange(4))], labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])])
self.assertRaises(KeyError, index.get_loc, (1, 1))
self.assertEqual(index.get_loc((2, 0)), slice(3, 5))
def test_get_loc_duplicates(self):
index = Index([2, 2, 2, 2])
result = index.get_loc(2)
expected = slice(0, 4)
self.assertEqual(result, expected)
# self.assertRaises(Exception, index.get_loc, 2)
index = Index(['c', 'a', 'a', 'b', 'b'])
rs = index.get_loc('c')
xp = 0
assert (rs == xp)
def test_get_loc_level(self):
index = MultiIndex(levels=[Index(lrange(4)), Index(lrange(4)), Index(
lrange(4))], labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])])
loc, new_index = index.get_loc_level((0, 1))
expected = slice(1, 2)
exp_index = index[expected].droplevel(0).droplevel(0)
self.assertEqual(loc, expected)
self.assertTrue(new_index.equals(exp_index))
loc, new_index = index.get_loc_level((0, 1, 0))
expected = 1
self.assertEqual(loc, expected)
self.assertIsNone(new_index)
self.assertRaises(KeyError, index.get_loc_level, (2, 2))
index = MultiIndex(levels=[[2000], lrange(4)], labels=[np.array(
[0, 0, 0, 0]), np.array([0, 1, 2, 3])])
result, new_index = index.get_loc_level((2000, slice(None, None)))
expected = slice(None, None)
self.assertEqual(result, expected)
self.assertTrue(new_index.equals(index.droplevel(0)))
def test_slice_locs(self):
df = tm.makeTimeDataFrame()
stacked = df.stack()
idx = stacked.index
slob = slice(*idx.slice_locs(df.index[5], df.index[15]))
sliced = stacked[slob]
expected = df[5:16].stack()
tm.assert_almost_equal(sliced.values, expected.values)
slob = slice(*idx.slice_locs(df.index[5] + timedelta(seconds=30),
df.index[15] - timedelta(seconds=30)))
sliced = stacked[slob]
expected = df[6:15].stack()
tm.assert_almost_equal(sliced.values, expected.values)
def test_slice_locs_with_type_mismatch(self):
df = tm.makeTimeDataFrame()
stacked = df.stack()
idx = stacked.index
assertRaisesRegexp(TypeError, '^Level type mismatch', idx.slice_locs,
(1, 3))
assertRaisesRegexp(TypeError, '^Level type mismatch', idx.slice_locs,
df.index[5] + timedelta(seconds=30), (5, 2))
df = tm.makeCustomDataframe(5, 5)
stacked = df.stack()
idx = stacked.index
with assertRaisesRegexp(TypeError, '^Level type mismatch'):
idx.slice_locs(timedelta(seconds=30))
# TODO: Try creating a UnicodeDecodeError in exception message
with assertRaisesRegexp(TypeError, '^Level type mismatch'):
idx.slice_locs(df.index[1], (16, "a"))
def test_slice_locs_not_sorted(self):
index = MultiIndex(levels=[Index(lrange(4)), Index(lrange(4)), Index(
lrange(4))], labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])])
assertRaisesRegexp(KeyError, "[Kk]ey length.*greater than MultiIndex"
" lexsort depth", index.slice_locs, (1, 0, 1),
(2, 1, 0))
# works
sorted_index, _ = index.sortlevel(0)
# should there be a test case here???
sorted_index.slice_locs((1, 0, 1), (2, 1, 0))
def test_slice_locs_partial(self):
sorted_idx, _ = self.index.sortlevel(0)
result = sorted_idx.slice_locs(('foo', 'two'), ('qux', 'one'))
self.assertEqual(result, (1, 5))
result = sorted_idx.slice_locs(None, ('qux', 'one'))
self.assertEqual(result, (0, 5))
result = sorted_idx.slice_locs(('foo', 'two'), None)
self.assertEqual(result, (1, len(sorted_idx)))
result = sorted_idx.slice_locs('bar', 'baz')
self.assertEqual(result, (2, 4))
def test_slice_locs_not_contained(self):
# some searchsorted action
index = MultiIndex(levels=[[0, 2, 4, 6], [0, 2, 4]],
labels=[[0, 0, 0, 1, 1, 2, 3, 3, 3],
[0, 1, 2, 1, 2, 2, 0, 1, 2]], sortorder=0)
result = index.slice_locs((1, 0), (5, 2))
self.assertEqual(result, (3, 6))
result = index.slice_locs(1, 5)
self.assertEqual(result, (3, 6))
result = index.slice_locs((2, 2), (5, 2))
self.assertEqual(result, (3, 6))
result = index.slice_locs(2, 5)
self.assertEqual(result, (3, 6))
result = index.slice_locs((1, 0), (6, 3))
self.assertEqual(result, (3, 8))
result = index.slice_locs(-1, 10)
self.assertEqual(result, (0, len(index)))
def test_consistency(self):
# need to construct an overflow
major_axis = lrange(70000)
minor_axis = lrange(10)
major_labels = np.arange(70000)
minor_labels = np.repeat(lrange(10), 7000)
# the fact that is works means it's consistent
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
# inconsistent
major_labels = np.array([0, 0, 1, 1, 1, 2, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 1, 0, 1, 0, 1])
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
self.assertFalse(index.is_unique)
def test_truncate(self):
major_axis = Index(lrange(4))
minor_axis = Index(lrange(2))
major_labels = np.array([0, 0, 1, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 0, 1])
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
result = index.truncate(before=1)
self.assertNotIn('foo', result.levels[0])
self.assertIn(1, result.levels[0])
result = index.truncate(after=1)
self.assertNotIn(2, result.levels[0])
self.assertIn(1, result.levels[0])
result = index.truncate(before=1, after=2)
self.assertEqual(len(result.levels[0]), 2)
# after < before
self.assertRaises(ValueError, index.truncate, 3, 1)
def test_get_indexer(self):
major_axis = Index(lrange(4))
minor_axis = Index(lrange(2))
major_labels = np.array([0, 0, 1, 2, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 0, 1, 0, 1])
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
idx1 = index[:5]
idx2 = index[[1, 3, 5]]
r1 = idx1.get_indexer(idx2)
assert_almost_equal(r1, [1, 3, -1])
r1 = idx2.get_indexer(idx1, method='pad')
e1 = [-1, 0, 0, 1, 1]
assert_almost_equal(r1, e1)
r2 = idx2.get_indexer(idx1[::-1], method='pad')
assert_almost_equal(r2, e1[::-1])
rffill1 = idx2.get_indexer(idx1, method='ffill')
assert_almost_equal(r1, rffill1)
r1 = idx2.get_indexer(idx1, method='backfill')
e1 = [0, 0, 1, 1, 2]
assert_almost_equal(r1, e1)
r2 = idx2.get_indexer(idx1[::-1], method='backfill')
assert_almost_equal(r2, e1[::-1])
rbfill1 = idx2.get_indexer(idx1, method='bfill')
assert_almost_equal(r1, rbfill1)
# pass non-MultiIndex
r1 = idx1.get_indexer(idx2._tuple_index)
rexp1 = idx1.get_indexer(idx2)
assert_almost_equal(r1, rexp1)
r1 = idx1.get_indexer([1, 2, 3])
self.assertTrue((r1 == [-1, -1, -1]).all())
# create index with duplicates
idx1 = Index(lrange(10) + lrange(10))
idx2 = Index(lrange(20))
assertRaisesRegexp(InvalidIndexError, "Reindexing only valid with"
" uniquely valued Index objects", idx1.get_indexer,
idx2)
def test_get_indexer_nearest(self):
midx = MultiIndex.from_tuples([('a', 1), ('b', 2)])
with tm.assertRaises(NotImplementedError):
midx.get_indexer(['a'], method='nearest')
with tm.assertRaises(NotImplementedError):
midx.get_indexer(['a'], method='pad', tolerance=2)
def test_format(self):
self.index.format()
self.index[:0].format()
def test_format_integer_names(self):
index = MultiIndex(levels=[[0, 1], [0, 1]],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]], names=[0, 1])
index.format(names=True)
def test_format_sparse_display(self):
index = MultiIndex(levels=[[0, 1], [0, 1], [0, 1], [0]],
labels=[[0, 0, 0, 1, 1, 1], [0, 0, 1, 0, 0, 1],
[0, 1, 0, 0, 1, 0], [0, 0, 0, 0, 0, 0]])
result = index.format()
self.assertEqual(result[3], '1 0 0 0')
def test_format_sparse_config(self):
warn_filters = warnings.filters
warnings.filterwarnings('ignore', category=FutureWarning,
module=".*format")
# GH1538
pd.set_option('display.multi_sparse', False)
result = self.index.format()
self.assertEqual(result[1], 'foo two')
self.reset_display_options()
warnings.filters = warn_filters
def test_to_hierarchical(self):
index = MultiIndex.from_tuples([(1, 'one'), (1, 'two'), (2, 'one'), (
2, 'two')])
result = index.to_hierarchical(3)
expected = MultiIndex(levels=[[1, 2], ['one', 'two']],
labels=[[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1]])
tm.assert_index_equal(result, expected)
self.assertEqual(result.names, index.names)
# K > 1
result = index.to_hierarchical(3, 2)
expected = MultiIndex(levels=[[1, 2], ['one', 'two']],
labels=[[0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1],
[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]])
tm.assert_index_equal(result, expected)
self.assertEqual(result.names, index.names)
# non-sorted
index = MultiIndex.from_tuples([(2, 'c'), (1, 'b'),
(2, 'a'), (2, 'b')],
names=['N1', 'N2'])
result = index.to_hierarchical(2)
expected = MultiIndex.from_tuples([(2, 'c'), (2, 'c'), (1, 'b'),
(1, 'b'),
(2, 'a'), (2, 'a'),
(2, 'b'), (2, 'b')],
names=['N1', 'N2'])
tm.assert_index_equal(result, expected)
self.assertEqual(result.names, index.names)
def test_bounds(self):
self.index._bounds
def test_equals(self):
self.assertTrue(self.index.equals(self.index))
self.assertTrue(self.index.equal_levels(self.index))
self.assertFalse(self.index.equals(self.index[:-1]))
self.assertTrue(self.index.equals(self.index._tuple_index))
# different number of levels
index = MultiIndex(levels=[Index(lrange(4)), Index(lrange(4)), Index(
lrange(4))], labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])])
index2 = MultiIndex(levels=index.levels[:-1], labels=index.labels[:-1])
self.assertFalse(index.equals(index2))
self.assertFalse(index.equal_levels(index2))
# levels are different
major_axis = Index(lrange(4))
minor_axis = Index(lrange(2))
major_labels = np.array([0, 0, 1, 2, 2, 3])
minor_labels = np.array([0, 1, 0, 0, 1, 0])
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
self.assertFalse(self.index.equals(index))
self.assertFalse(self.index.equal_levels(index))
# some of the labels are different
major_axis = Index(['foo', 'bar', 'baz', 'qux'])
minor_axis = Index(['one', 'two'])
major_labels = np.array([0, 0, 2, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 0, 1])
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
self.assertFalse(self.index.equals(index))
def test_identical(self):
mi = self.index.copy()
mi2 = self.index.copy()
self.assertTrue(mi.identical(mi2))
mi = mi.set_names(['new1', 'new2'])
self.assertTrue(mi.equals(mi2))
self.assertFalse(mi.identical(mi2))
mi2 = mi2.set_names(['new1', 'new2'])
self.assertTrue(mi.identical(mi2))
mi3 = Index(mi.tolist(), names=mi.names)
mi4 = Index(mi.tolist(), names=mi.names, tupleize_cols=False)
self.assertTrue(mi.identical(mi3))
self.assertFalse(mi.identical(mi4))
self.assertTrue(mi.equals(mi4))
def test_is_(self):
mi = MultiIndex.from_tuples(lzip(range(10), range(10)))
self.assertTrue(mi.is_(mi))
self.assertTrue(mi.is_(mi.view()))
self.assertTrue(mi.is_(mi.view().view().view().view()))
mi2 = mi.view()
# names are metadata, they don't change id
mi2.names = ["A", "B"]
self.assertTrue(mi2.is_(mi))
self.assertTrue(mi.is_(mi2))
self.assertTrue(mi.is_(mi.set_names(["C", "D"])))
mi2 = mi.view()
mi2.set_names(["E", "F"], inplace=True)
self.assertTrue(mi.is_(mi2))
# levels are inherent properties, they change identity
mi3 = mi2.set_levels([lrange(10), lrange(10)])
self.assertFalse(mi3.is_(mi2))
# shouldn't change
self.assertTrue(mi2.is_(mi))
mi4 = mi3.view()
mi4.set_levels([[1 for _ in range(10)], lrange(10)], inplace=True)
self.assertFalse(mi4.is_(mi3))
mi5 = mi.view()
mi5.set_levels(mi5.levels, inplace=True)
self.assertFalse(mi5.is_(mi))
def test_union(self):
piece1 = self.index[:5][::-1]
piece2 = self.index[3:]
the_union = piece1 | piece2
tups = sorted(self.index._tuple_index)
expected = MultiIndex.from_tuples(tups)
self.assertTrue(the_union.equals(expected))
# corner case, pass self or empty thing:
the_union = self.index.union(self.index)
self.assertIs(the_union, self.index)
the_union = self.index.union(self.index[:0])
self.assertIs(the_union, self.index)
# won't work in python 3
# tuples = self.index._tuple_index
# result = self.index[:4] | tuples[4:]
# self.assertTrue(result.equals(tuples))
# not valid for python 3
# def test_union_with_regular_index(self):
# other = Index(['A', 'B', 'C'])
# result = other.union(self.index)
# self.assertIn(('foo', 'one'), result)
# self.assertIn('B', result)
# result2 = self.index.union(other)
# self.assertTrue(result.equals(result2))
def test_intersection(self):
piece1 = self.index[:5][::-1]
piece2 = self.index[3:]
the_int = piece1 & piece2
tups = sorted(self.index[3:5]._tuple_index)
expected = MultiIndex.from_tuples(tups)
self.assertTrue(the_int.equals(expected))
# corner case, pass self
the_int = self.index.intersection(self.index)
self.assertIs(the_int, self.index)
# empty intersection: disjoint
empty = self.index[:2] & self.index[2:]
expected = self.index[:0]
self.assertTrue(empty.equals(expected))
# can't do in python 3
# tuples = self.index._tuple_index
# result = self.index & tuples
# self.assertTrue(result.equals(tuples))
def test_difference(self):
first = self.index
result = first.difference(self.index[-3:])
# - API change GH 8226
with tm.assert_produces_warning():
first - self.index[-3:]
with tm.assert_produces_warning():
self.index[-3:] - first
with tm.assert_produces_warning():
self.index[-3:] - first.tolist()
self.assertRaises(TypeError, lambda: first.tolist() - self.index[-3:])
expected = MultiIndex.from_tuples(sorted(self.index[:-3].values),
sortorder=0,
names=self.index.names)
tm.assertIsInstance(result, MultiIndex)
self.assertTrue(result.equals(expected))
self.assertEqual(result.names, self.index.names)
# empty difference: reflexive
result = self.index.difference(self.index)
expected = self.index[:0]
self.assertTrue(result.equals(expected))
self.assertEqual(result.names, self.index.names)
# empty difference: superset
result = self.index[-3:].difference(self.index)
expected = self.index[:0]
self.assertTrue(result.equals(expected))
self.assertEqual(result.names, self.index.names)
# empty difference: degenerate
result = self.index[:0].difference(self.index)
expected = self.index[:0]
self.assertTrue(result.equals(expected))
self.assertEqual(result.names, self.index.names)
# names not the same
chunklet = self.index[-3:]
chunklet.names = ['foo', 'baz']
result = first.difference(chunklet)
self.assertEqual(result.names, (None, None))
# empty, but non-equal
result = self.index.difference(self.index.sortlevel(1)[0])
self.assertEqual(len(result), 0)
# raise Exception called with non-MultiIndex
result = first.difference(first._tuple_index)
self.assertTrue(result.equals(first[:0]))
# name from empty array
result = first.difference([])
self.assertTrue(first.equals(result))
self.assertEqual(first.names, result.names)
# name from non-empty array
result = first.difference([('foo', 'one')])
expected = pd.MultiIndex.from_tuples([('bar', 'one'), ('baz', 'two'), (
'foo', 'two'), ('qux', 'one'), ('qux', 'two')])
expected.names = first.names
self.assertEqual(first.names, result.names)
assertRaisesRegexp(TypeError, "other must be a MultiIndex or a list"
" of tuples", first.difference, [1, 2, 3, 4, 5])
def test_from_tuples(self):
assertRaisesRegexp(TypeError, 'Cannot infer number of levels from'
' empty list', MultiIndex.from_tuples, [])
idx = MultiIndex.from_tuples(((1, 2), (3, 4)), names=['a', 'b'])
self.assertEqual(len(idx), 2)
def test_argsort(self):
result = self.index.argsort()
expected = self.index._tuple_index.argsort()
tm.assert_numpy_array_equal(result, expected)
def test_sortlevel(self):
import random
tuples = list(self.index)
random.shuffle(tuples)
index = MultiIndex.from_tuples(tuples)
sorted_idx, _ = index.sortlevel(0)
expected = MultiIndex.from_tuples(sorted(tuples))
self.assertTrue(sorted_idx.equals(expected))
sorted_idx, _ = index.sortlevel(0, ascending=False)
self.assertTrue(sorted_idx.equals(expected[::-1]))
sorted_idx, _ = index.sortlevel(1)
by1 = sorted(tuples, key=lambda x: (x[1], x[0]))
expected = MultiIndex.from_tuples(by1)
self.assertTrue(sorted_idx.equals(expected))
sorted_idx, _ = index.sortlevel(1, ascending=False)
self.assertTrue(sorted_idx.equals(expected[::-1]))
def test_sortlevel_not_sort_remaining(self):
mi = MultiIndex.from_tuples([[1, 1, 3], [1, 1, 1]], names=list('ABC'))
sorted_idx, _ = mi.sortlevel('A', sort_remaining=False)
self.assertTrue(sorted_idx.equals(mi))
def test_sortlevel_deterministic(self):
tuples = [('bar', 'one'), ('foo', 'two'), ('qux', 'two'),
('foo', 'one'), ('baz', 'two'), ('qux', 'one')]
index = MultiIndex.from_tuples(tuples)
sorted_idx, _ = index.sortlevel(0)
expected = MultiIndex.from_tuples(sorted(tuples))
self.assertTrue(sorted_idx.equals(expected))
sorted_idx, _ = index.sortlevel(0, ascending=False)
self.assertTrue(sorted_idx.equals(expected[::-1]))
sorted_idx, _ = index.sortlevel(1)
by1 = sorted(tuples, key=lambda x: (x[1], x[0]))
expected = MultiIndex.from_tuples(by1)
self.assertTrue(sorted_idx.equals(expected))
sorted_idx, _ = index.sortlevel(1, ascending=False)
self.assertTrue(sorted_idx.equals(expected[::-1]))
def test_dims(self):
pass
def test_drop(self):
dropped = self.index.drop([('foo', 'two'), ('qux', 'one')])
index = MultiIndex.from_tuples([('foo', 'two'), ('qux', 'one')])
dropped2 = self.index.drop(index)
expected = self.index[[0, 2, 3, 5]]
self.assert_index_equal(dropped, expected)
self.assert_index_equal(dropped2, expected)
dropped = self.index.drop(['bar'])
expected = self.index[[0, 1, 3, 4, 5]]
self.assert_index_equal(dropped, expected)
dropped = self.index.drop('foo')
expected = self.index[[2, 3, 4, 5]]
self.assert_index_equal(dropped, expected)
index = MultiIndex.from_tuples([('bar', 'two')])
self.assertRaises(KeyError, self.index.drop, [('bar', 'two')])
self.assertRaises(KeyError, self.index.drop, index)
self.assertRaises(KeyError, self.index.drop, ['foo', 'two'])
# partially correct argument
mixed_index = MultiIndex.from_tuples([('qux', 'one'), ('bar', 'two')])
self.assertRaises(KeyError, self.index.drop, mixed_index)
# error='ignore'
dropped = self.index.drop(index, errors='ignore')
expected = self.index[[0, 1, 2, 3, 4, 5]]
self.assert_index_equal(dropped, expected)
dropped = self.index.drop(mixed_index, errors='ignore')
expected = self.index[[0, 1, 2, 3, 5]]
self.assert_index_equal(dropped, expected)
dropped = self.index.drop(['foo', 'two'], errors='ignore')
expected = self.index[[2, 3, 4, 5]]
self.assert_index_equal(dropped, expected)
# mixed partial / full drop
dropped = self.index.drop(['foo', ('qux', 'one')])
expected = self.index[[2, 3, 5]]
self.assert_index_equal(dropped, expected)
# mixed partial / full drop / error='ignore'
mixed_index = ['foo', ('qux', 'one'), 'two']
self.assertRaises(KeyError, self.index.drop, mixed_index)
dropped = self.index.drop(mixed_index, errors='ignore')
expected = self.index[[2, 3, 5]]
self.assert_index_equal(dropped, expected)
def test_droplevel_with_names(self):
index = self.index[self.index.get_loc('foo')]
dropped = index.droplevel(0)
self.assertEqual(dropped.name, 'second')
index = MultiIndex(levels=[Index(lrange(4)), Index(lrange(4)), Index(
lrange(4))], labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])],
names=['one', 'two', 'three'])
dropped = index.droplevel(0)
self.assertEqual(dropped.names, ('two', 'three'))
dropped = index.droplevel('two')
expected = index.droplevel(1)
self.assertTrue(dropped.equals(expected))
def test_droplevel_multiple(self):
index = MultiIndex(levels=[Index(lrange(4)), Index(lrange(4)), Index(
lrange(4))], labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])],
names=['one', 'two', 'three'])
dropped = index[:2].droplevel(['three', 'one'])
expected = index[:2].droplevel(2).droplevel(0)
self.assertTrue(dropped.equals(expected))
def test_drop_not_lexsorted(self):
# GH 12078
# define the lexsorted version of the multi-index
tuples = [('a', ''), ('b1', 'c1'), ('b2', 'c2')]
lexsorted_mi = MultiIndex.from_tuples(tuples, names=['b', 'c'])
self.assertTrue(lexsorted_mi.is_lexsorted())
# and the not-lexsorted version
df = pd.DataFrame(columns=['a', 'b', 'c', 'd'],
data=[[1, 'b1', 'c1', 3], [1, 'b2', 'c2', 4]])
df = df.pivot_table(index='a', columns=['b', 'c'], values='d')
df = df.reset_index()
not_lexsorted_mi = df.columns
self.assertFalse(not_lexsorted_mi.is_lexsorted())
# compare the results
self.assert_index_equal(lexsorted_mi, not_lexsorted_mi)
with self.assert_produces_warning(PerformanceWarning):
self.assert_index_equal(lexsorted_mi.drop('a'),
not_lexsorted_mi.drop('a'))
def test_insert(self):
# key contained in all levels
new_index = self.index.insert(0, ('bar', 'two'))
self.assertTrue(new_index.equal_levels(self.index))
self.assertEqual(new_index[0], ('bar', 'two'))
# key not contained in all levels
new_index = self.index.insert(0, ('abc', 'three'))
tm.assert_numpy_array_equal(new_index.levels[0],
list(self.index.levels[0]) + ['abc'])
tm.assert_numpy_array_equal(new_index.levels[1],
list(self.index.levels[1]) + ['three'])
self.assertEqual(new_index[0], ('abc', 'three'))
# key wrong length
assertRaisesRegexp(ValueError, "Item must have length equal to number"
" of levels", self.index.insert, 0, ('foo2', ))
left = pd.DataFrame([['a', 'b', 0], ['b', 'd', 1]],
columns=['1st', '2nd', '3rd'])
left.set_index(['1st', '2nd'], inplace=True)
ts = left['3rd'].copy(deep=True)
left.loc[('b', 'x'), '3rd'] = 2
left.loc[('b', 'a'), '3rd'] = -1
left.loc[('b', 'b'), '3rd'] = 3
left.loc[('a', 'x'), '3rd'] = 4
left.loc[('a', 'w'), '3rd'] = 5
left.loc[('a', 'a'), '3rd'] = 6
ts.loc[('b', 'x')] = 2
ts.loc['b', 'a'] = -1
ts.loc[('b', 'b')] = 3
ts.loc['a', 'x'] = 4
ts.loc[('a', 'w')] = 5
ts.loc['a', 'a'] = 6
right = pd.DataFrame([['a', 'b', 0],
['b', 'd', 1],
['b', 'x', 2],
['b', 'a', -1],
['b', 'b', 3],
['a', 'x', 4],
['a', 'w', 5],
['a', 'a', 6]],
columns=['1st', '2nd', '3rd'])
right.set_index(['1st', '2nd'], inplace=True)
# FIXME data types changes to float because
# of intermediate nan insertion;
tm.assert_frame_equal(left, right, check_dtype=False)
tm.assert_series_equal(ts, right['3rd'])
# GH9250
idx = [('test1', i) for i in range(5)] + \
[('test2', i) for i in range(6)] + \
[('test', 17), ('test', 18)]
left = pd.Series(np.linspace(0, 10, 11),
pd.MultiIndex.from_tuples(idx[:-2]))
left.loc[('test', 17)] = 11
left.ix[('test', 18)] = 12
right = pd.Series(np.linspace(0, 12, 13),
pd.MultiIndex.from_tuples(idx))
tm.assert_series_equal(left, right)
def test_take_preserve_name(self):
taken = self.index.take([3, 0, 1])
self.assertEqual(taken.names, self.index.names)
def test_take_fill_value(self):
# GH 12631
vals = [['A', 'B'],
[pd.Timestamp('2011-01-01'), pd.Timestamp('2011-01-02')]]
idx = pd.MultiIndex.from_product(vals, names=['str', 'dt'])
result = idx.take(np.array([1, 0, -1]))
exp_vals = [('A', pd.Timestamp('2011-01-02')),
('A', pd.Timestamp('2011-01-01')),
('B', pd.Timestamp('2011-01-02'))]
expected = pd.MultiIndex.from_tuples(exp_vals, names=['str', 'dt'])
tm.assert_index_equal(result, expected)
# fill_value
result = idx.take(np.array([1, 0, -1]), fill_value=True)
exp_vals = [('A', pd.Timestamp('2011-01-02')),
('A', pd.Timestamp('2011-01-01')),
(np.nan, pd.NaT)]
expected = pd.MultiIndex.from_tuples(exp_vals, names=['str', 'dt'])
tm.assert_index_equal(result, expected)
# allow_fill=False
result = idx.take(np.array([1, 0, -1]), allow_fill=False,
fill_value=True)
exp_vals = [('A', pd.Timestamp('2011-01-02')),
('A', pd.Timestamp('2011-01-01')),
('B', pd.Timestamp('2011-01-02'))]
expected = pd.MultiIndex.from_tuples(exp_vals, names=['str', 'dt'])
tm.assert_index_equal(result, expected)
msg = ('When allow_fill=True and fill_value is not None, '
'all indices must be >= -1')
with tm.assertRaisesRegexp(ValueError, msg):
idx.take(np.array([1, 0, -2]), fill_value=True)
with tm.assertRaisesRegexp(ValueError, msg):
idx.take(np.array([1, 0, -5]), fill_value=True)
with tm.assertRaises(IndexError):
idx.take(np.array([1, -5]))
def take_invalid_kwargs(self):
vals = [['A', 'B'],
[pd.Timestamp('2011-01-01'), pd.Timestamp('2011-01-02')]]
idx = pd.MultiIndex.from_product(vals, names=['str', 'dt'])
indices = [1, 2]
msg = "take\(\) got an unexpected keyword argument 'foo'"
tm.assertRaisesRegexp(TypeError, msg, idx.take,
indices, foo=2)
msg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, idx.take,
indices, out=indices)
msg = "the 'mode' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, idx.take,
indices, mode='clip')
def test_join_level(self):
def _check_how(other, how):
join_index, lidx, ridx = other.join(self.index, how=how,
level='second',
return_indexers=True)
exp_level = other.join(self.index.levels[1], how=how)
self.assertTrue(join_index.levels[0].equals(self.index.levels[0]))
self.assertTrue(join_index.levels[1].equals(exp_level))
# pare down levels
mask = np.array(
[x[1] in exp_level for x in self.index], dtype=bool)
exp_values = self.index.values[mask]
tm.assert_numpy_array_equal(join_index.values, exp_values)
if how in ('outer', 'inner'):
join_index2, ridx2, lidx2 = \
self.index.join(other, how=how, level='second',
return_indexers=True)
self.assertTrue(join_index.equals(join_index2))
tm.assert_numpy_array_equal(lidx, lidx2)
tm.assert_numpy_array_equal(ridx, ridx2)
tm.assert_numpy_array_equal(join_index2.values, exp_values)
def _check_all(other):
_check_how(other, 'outer')
_check_how(other, 'inner')
_check_how(other, 'left')
_check_how(other, 'right')
_check_all(Index(['three', 'one', 'two']))
_check_all(Index(['one']))
_check_all(Index(['one', 'three']))
# some corner cases
idx = Index(['three', 'one', 'two'])
result = idx.join(self.index, level='second')
tm.assertIsInstance(result, MultiIndex)
assertRaisesRegexp(TypeError, "Join.*MultiIndex.*ambiguous",
self.index.join, self.index, level=1)
def test_join_self(self):
kinds = 'outer', 'inner', 'left', 'right'
for kind in kinds:
res = self.index
joined = res.join(res, how=kind)
self.assertIs(res, joined)
def test_join_multi(self):
# GH 10665
midx = pd.MultiIndex.from_product(
[np.arange(4), np.arange(4)], names=['a', 'b'])
idx = pd.Index([1, 2, 5], name='b')
# inner
jidx, lidx, ridx = midx.join(idx, how='inner', return_indexers=True)
exp_idx = pd.MultiIndex.from_product(
[np.arange(4), [1, 2]], names=['a', 'b'])
exp_lidx = np.array([1, 2, 5, 6, 9, 10, 13, 14])
exp_ridx = np.array([0, 1, 0, 1, 0, 1, 0, 1])
self.assert_index_equal(jidx, exp_idx)
self.assert_numpy_array_equal(lidx, exp_lidx)
self.assert_numpy_array_equal(ridx, exp_ridx)
# flip
jidx, ridx, lidx = idx.join(midx, how='inner', return_indexers=True)
self.assert_index_equal(jidx, exp_idx)
self.assert_numpy_array_equal(lidx, exp_lidx)
self.assert_numpy_array_equal(ridx, exp_ridx)
# keep MultiIndex
jidx, lidx, ridx = midx.join(idx, how='left', return_indexers=True)
exp_ridx = np.array([-1, 0, 1, -1, -1, 0, 1, -1, -1, 0, 1, -1, -1, 0,
1, -1])
self.assert_index_equal(jidx, midx)
self.assertIsNone(lidx)
self.assert_numpy_array_equal(ridx, exp_ridx)
# flip
jidx, ridx, lidx = idx.join(midx, how='right', return_indexers=True)
self.assert_index_equal(jidx, midx)
self.assertIsNone(lidx)
self.assert_numpy_array_equal(ridx, exp_ridx)
def test_reindex(self):
result, indexer = self.index.reindex(list(self.index[:4]))
tm.assertIsInstance(result, MultiIndex)
self.check_level_names(result, self.index[:4].names)
result, indexer = self.index.reindex(list(self.index))
tm.assertIsInstance(result, MultiIndex)
self.assertIsNone(indexer)
self.check_level_names(result, self.index.names)
def test_reindex_level(self):
idx = Index(['one'])
target, indexer = self.index.reindex(idx, level='second')
target2, indexer2 = idx.reindex(self.index, level='second')
exp_index = self.index.join(idx, level='second', how='right')
exp_index2 = self.index.join(idx, level='second', how='left')
self.assertTrue(target.equals(exp_index))
exp_indexer = np.array([0, 2, 4])
tm.assert_numpy_array_equal(indexer, exp_indexer)
self.assertTrue(target2.equals(exp_index2))
exp_indexer2 = np.array([0, -1, 0, -1, 0, -1])
tm.assert_numpy_array_equal(indexer2, exp_indexer2)
assertRaisesRegexp(TypeError, "Fill method not supported",
self.index.reindex, self.index, method='pad',
level='second')
assertRaisesRegexp(TypeError, "Fill method not supported", idx.reindex,
idx, method='bfill', level='first')
def test_duplicates(self):
self.assertFalse(self.index.has_duplicates)
self.assertTrue(self.index.append(self.index).has_duplicates)
index = MultiIndex(levels=[[0, 1], [0, 1, 2]], labels=[
[0, 0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 0, 1, 2]])
self.assertTrue(index.has_duplicates)
# GH 9075
t = [(u('x'), u('out'), u('z'), 5, u('y'), u('in'), u('z'), 169),
(u('x'), u('out'), u('z'), 7, u('y'), u('in'), u('z'), 119),
(u('x'), u('out'), u('z'), 9, u('y'), u('in'), u('z'), 135),
(u('x'), u('out'), u('z'), 13, u('y'), u('in'), u('z'), 145),
(u('x'), u('out'), u('z'), 14, u('y'), u('in'), u('z'), 158),
(u('x'), u('out'), u('z'), 16, u('y'), u('in'), u('z'), 122),
(u('x'), u('out'), u('z'), 17, u('y'), u('in'), u('z'), 160),
(u('x'), u('out'), u('z'), 18, u('y'), u('in'), u('z'), 180),
(u('x'), u('out'), u('z'), 20, u('y'), u('in'), u('z'), 143),
(u('x'), u('out'), u('z'), 21, u('y'), u('in'), u('z'), 128),
(u('x'), u('out'), u('z'), 22, u('y'), u('in'), u('z'), 129),
(u('x'), u('out'), u('z'), 25, u('y'), u('in'), u('z'), 111),
(u('x'), u('out'), u('z'), 28, u('y'), u('in'), u('z'), 114),
(u('x'), u('out'), u('z'), 29, u('y'), u('in'), u('z'), 121),
(u('x'), u('out'), u('z'), 31, u('y'), u('in'), u('z'), 126),
(u('x'), u('out'), u('z'), 32, u('y'), u('in'), u('z'), 155),
(u('x'), u('out'), u('z'), 33, u('y'), u('in'), u('z'), 123),
(u('x'), u('out'), u('z'), 12, u('y'), u('in'), u('z'), 144)]
index = pd.MultiIndex.from_tuples(t)
self.assertFalse(index.has_duplicates)
# handle int64 overflow if possible
def check(nlevels, with_nulls):
labels = np.tile(np.arange(500), 2)
level = np.arange(500)
if with_nulls: # inject some null values
labels[500] = -1 # common nan value
labels = list(labels.copy() for i in range(nlevels))
for i in range(nlevels):
labels[i][500 + i - nlevels // 2] = -1
labels += [np.array([-1, 1]).repeat(500)]
else:
labels = [labels] * nlevels + [np.arange(2).repeat(500)]
levels = [level] * nlevels + [[0, 1]]
# no dups
index = MultiIndex(levels=levels, labels=labels)
self.assertFalse(index.has_duplicates)
# with a dup
if with_nulls:
f = lambda a: np.insert(a, 1000, a[0])
labels = list(map(f, labels))
index = MultiIndex(levels=levels, labels=labels)
else:
values = index.values.tolist()
index = MultiIndex.from_tuples(values + [values[0]])
self.assertTrue(index.has_duplicates)
# no overflow
check(4, False)
check(4, True)
# overflow possible
check(8, False)
check(8, True)
# GH 9125
n, k = 200, 5000
levels = [np.arange(n), tm.makeStringIndex(n), 1000 + np.arange(n)]
labels = [np.random.choice(n, k * n) for lev in levels]
mi = MultiIndex(levels=levels, labels=labels)
for keep in ['first', 'last', False]:
left = mi.duplicated(keep=keep)
right = pd.lib.duplicated(mi.values, keep=keep)
tm.assert_numpy_array_equal(left, right)
# GH5873
for a in [101, 102]:
mi = MultiIndex.from_arrays([[101, a], [3.5, np.nan]])
self.assertFalse(mi.has_duplicates)
self.assertEqual(mi.get_duplicates(), [])
tm.assert_numpy_array_equal(mi.duplicated(), np.zeros(
2, dtype='bool'))
for n in range(1, 6): # 1st level shape
for m in range(1, 5): # 2nd level shape
# all possible unique combinations, including nan
lab = product(range(-1, n), range(-1, m))
mi = MultiIndex(levels=[list('abcde')[:n], list('WXYZ')[:m]],
labels=np.random.permutation(list(lab)).T)
self.assertEqual(len(mi), (n + 1) * (m + 1))
self.assertFalse(mi.has_duplicates)
self.assertEqual(mi.get_duplicates(), [])
tm.assert_numpy_array_equal(mi.duplicated(), np.zeros(
len(mi), dtype='bool'))
def test_duplicate_meta_data(self):
# GH 10115
index = MultiIndex(levels=[[0, 1], [0, 1, 2]], labels=[
[0, 0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 0, 1, 2]])
for idx in [index,
index.set_names([None, None]),
index.set_names([None, 'Num']),
index.set_names(['Upper', 'Num']), ]:
self.assertTrue(idx.has_duplicates)
self.assertEqual(idx.drop_duplicates().names, idx.names)
def test_tolist(self):
result = self.index.tolist()
exp = list(self.index.values)
self.assertEqual(result, exp)
def test_repr_with_unicode_data(self):
with pd.core.config.option_context("display.encoding", 'UTF-8'):
d = {"a": [u("\u05d0"), 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]}
index = pd.DataFrame(d).set_index(["a", "b"]).index
self.assertFalse("\\u" in repr(index)
) # we don't want unicode-escaped
def test_repr_roundtrip(self):
mi = MultiIndex.from_product([list('ab'), range(3)],
names=['first', 'second'])
str(mi)
if PY3:
tm.assert_index_equal(eval(repr(mi)), mi, exact=True)
else:
result = eval(repr(mi))
# string coerces to unicode
tm.assert_index_equal(result, mi, exact=False)
self.assertEqual(
mi.get_level_values('first').inferred_type, 'string')
self.assertEqual(
result.get_level_values('first').inferred_type, 'unicode')
mi_u = MultiIndex.from_product(
[list(u'ab'), range(3)], names=['first', 'second'])
result = eval(repr(mi_u))
tm.assert_index_equal(result, mi_u, exact=True)
# formatting
if PY3:
str(mi)
else:
compat.text_type(mi)
# long format
mi = MultiIndex.from_product([list('abcdefg'), range(10)],
names=['first', 'second'])
result = str(mi)
if PY3:
tm.assert_index_equal(eval(repr(mi)), mi, exact=True)
else:
result = eval(repr(mi))
# string coerces to unicode
tm.assert_index_equal(result, mi, exact=False)
self.assertEqual(
mi.get_level_values('first').inferred_type, 'string')
self.assertEqual(
result.get_level_values('first').inferred_type, 'unicode')
mi = MultiIndex.from_product(
[list(u'abcdefg'), range(10)], names=['first', 'second'])
result = eval(repr(mi_u))
tm.assert_index_equal(result, mi_u, exact=True)
def test_str(self):
# tested elsewhere
pass
def test_unicode_string_with_unicode(self):
d = {"a": [u("\u05d0"), 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]}
idx = pd.DataFrame(d).set_index(["a", "b"]).index
if PY3:
str(idx)
else:
compat.text_type(idx)
def test_bytestring_with_unicode(self):
d = {"a": [u("\u05d0"), 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]}
idx = pd.DataFrame(d).set_index(["a", "b"]).index
if PY3:
bytes(idx)
else:
str(idx)
def test_slice_keep_name(self):
x = MultiIndex.from_tuples([('a', 'b'), (1, 2), ('c', 'd')],
names=['x', 'y'])
self.assertEqual(x[1:].names, x.names)
def test_isnull_behavior(self):
# should not segfault GH5123
# NOTE: if MI representation changes, may make sense to allow
# isnull(MI)
with tm.assertRaises(NotImplementedError):
pd.isnull(self.index)
def test_level_setting_resets_attributes(self):
ind = MultiIndex.from_arrays([
['A', 'A', 'B', 'B', 'B'], [1, 2, 1, 2, 3]
])
assert ind.is_monotonic
ind.set_levels([['A', 'B', 'A', 'A', 'B'], [2, 1, 3, -2, 5]],
inplace=True)
# if this fails, probably didn't reset the cache correctly.
assert not ind.is_monotonic
def test_isin(self):
values = [('foo', 2), ('bar', 3), ('quux', 4)]
idx = MultiIndex.from_arrays([['qux', 'baz', 'foo', 'bar'], np.arange(
4)])
result = idx.isin(values)
expected = np.array([False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
# empty, return dtype bool
idx = MultiIndex.from_arrays([[], []])
result = idx.isin(values)
self.assertEqual(len(result), 0)
self.assertEqual(result.dtype, np.bool_)
def test_isin_nan(self):
idx = MultiIndex.from_arrays([['foo', 'bar'], [1.0, np.nan]])
tm.assert_numpy_array_equal(idx.isin([('bar', np.nan)]),
[False, False])
tm.assert_numpy_array_equal(idx.isin([('bar', float('nan'))]),
[False, False])
def test_isin_level_kwarg(self):
idx = MultiIndex.from_arrays([['qux', 'baz', 'foo', 'bar'], np.arange(
4)])
vals_0 = ['foo', 'bar', 'quux']
vals_1 = [2, 3, 10]
expected = np.array([False, False, True, True])
tm.assert_numpy_array_equal(expected, idx.isin(vals_0, level=0))
tm.assert_numpy_array_equal(expected, idx.isin(vals_0, level=-2))
tm.assert_numpy_array_equal(expected, idx.isin(vals_1, level=1))
tm.assert_numpy_array_equal(expected, idx.isin(vals_1, level=-1))
self.assertRaises(IndexError, idx.isin, vals_0, level=5)
self.assertRaises(IndexError, idx.isin, vals_0, level=-5)
self.assertRaises(KeyError, idx.isin, vals_0, level=1.0)
self.assertRaises(KeyError, idx.isin, vals_1, level=-1.0)
self.assertRaises(KeyError, idx.isin, vals_1, level='A')
idx.names = ['A', 'B']
tm.assert_numpy_array_equal(expected, idx.isin(vals_0, level='A'))
tm.assert_numpy_array_equal(expected, idx.isin(vals_1, level='B'))
self.assertRaises(KeyError, idx.isin, vals_1, level='C')
def test_reindex_preserves_names_when_target_is_list_or_ndarray(self):
# GH6552
idx = self.index.copy()
target = idx.copy()
idx.names = target.names = [None, None]
other_dtype = pd.MultiIndex.from_product([[1, 2], [3, 4]])
# list & ndarray cases
self.assertEqual(idx.reindex([])[0].names, [None, None])
self.assertEqual(idx.reindex(np.array([]))[0].names, [None, None])
self.assertEqual(idx.reindex(target.tolist())[0].names, [None, None])
self.assertEqual(idx.reindex(target.values)[0].names, [None, None])
self.assertEqual(
idx.reindex(other_dtype.tolist())[0].names, [None, None])
self.assertEqual(
idx.reindex(other_dtype.values)[0].names, [None, None])
idx.names = ['foo', 'bar']
self.assertEqual(idx.reindex([])[0].names, ['foo', 'bar'])
self.assertEqual(idx.reindex(np.array([]))[0].names, ['foo', 'bar'])
self.assertEqual(idx.reindex(target.tolist())[0].names, ['foo', 'bar'])
self.assertEqual(idx.reindex(target.values)[0].names, ['foo', 'bar'])
self.assertEqual(
idx.reindex(other_dtype.tolist())[0].names, ['foo', 'bar'])
self.assertEqual(
idx.reindex(other_dtype.values)[0].names, ['foo', 'bar'])
def test_reindex_lvl_preserves_names_when_target_is_list_or_array(self):
# GH7774
idx = pd.MultiIndex.from_product([[0, 1], ['a', 'b']],
names=['foo', 'bar'])
self.assertEqual(idx.reindex([], level=0)[0].names, ['foo', 'bar'])
self.assertEqual(idx.reindex([], level=1)[0].names, ['foo', 'bar'])
def test_reindex_lvl_preserves_type_if_target_is_empty_list_or_array(self):
# GH7774
idx = pd.MultiIndex.from_product([[0, 1], ['a', 'b']])
self.assertEqual(idx.reindex([], level=0)[0].levels[0].dtype.type,
np.int64)
self.assertEqual(idx.reindex([], level=1)[0].levels[1].dtype.type,
np.object_)
def test_groupby(self):
groups = self.index.groupby(np.array([1, 1, 1, 2, 2, 2]))
labels = self.index.get_values().tolist()
exp = {1: labels[:3], 2: labels[3:]}
tm.assert_dict_equal(groups, exp)
# GH5620
groups = self.index.groupby(self.index)
exp = dict((key, [key]) for key in self.index)
tm.assert_dict_equal(groups, exp)
def test_index_name_retained(self):
# GH9857
result = pd.DataFrame({'x': [1, 2, 6],
'y': [2, 2, 8],
'z': [-5, 0, 5]})
result = result.set_index('z')
result.loc[10] = [9, 10]
df_expected = pd.DataFrame({'x': [1, 2, 6, 9],
'y': [2, 2, 8, 10],
'z': [-5, 0, 5, 10]})
df_expected = df_expected.set_index('z')
tm.assert_frame_equal(result, df_expected)
def test_equals_operator(self):
# GH9785
self.assertTrue((self.index == self.index).all())
def test_large_multiindex_error(self):
# GH12527
df_below_1000000 = pd.DataFrame(
1, index=pd.MultiIndex.from_product([[1, 2], range(499999)]),
columns=['dest'])
with assertRaises(KeyError):
df_below_1000000.loc[(-1, 0), 'dest']
with assertRaises(KeyError):
df_below_1000000.loc[(3, 0), 'dest']
df_above_1000000 = pd.DataFrame(
1, index=pd.MultiIndex.from_product([[1, 2], range(500001)]),
columns=['dest'])
with assertRaises(KeyError):
df_above_1000000.loc[(-1, 0), 'dest']
with assertRaises(KeyError):
df_above_1000000.loc[(3, 0), 'dest']
def test_partial_string_timestamp_multiindex(self):
# GH10331
dr = pd.date_range('2016-01-01', '2016-01-03', freq='12H')
abc = ['a', 'b', 'c']
ix = pd.MultiIndex.from_product([dr, abc])
df = pd.DataFrame({'c1': range(0, 15)}, index=ix)
idx = pd.IndexSlice
# c1
# 2016-01-01 00:00:00 a 0
# b 1
# c 2
# 2016-01-01 12:00:00 a 3
# b 4
# c 5
# 2016-01-02 00:00:00 a 6
# b 7
# c 8
# 2016-01-02 12:00:00 a 9
# b 10
# c 11
# 2016-01-03 00:00:00 a 12
# b 13
# c 14
# partial string matching on a single index
for df_swap in (df.swaplevel(),
df.swaplevel(0),
df.swaplevel(0, 1)):
df_swap = df_swap.sort_index()
just_a = df_swap.loc['a']
result = just_a.loc['2016-01-01']
expected = df.loc[idx[:, 'a'], :].iloc[0:2]
expected.index = expected.index.droplevel(1)
tm.assert_frame_equal(result, expected)
# indexing with IndexSlice
result = df.loc[idx['2016-01-01':'2016-02-01', :], :]
expected = df
tm.assert_frame_equal(result, expected)
# match on secondary index
result = df_swap.loc[idx[:, '2016-01-01':'2016-01-01'], :]
expected = df_swap.iloc[[0, 1, 5, 6, 10, 11]]
tm.assert_frame_equal(result, expected)
# Even though this syntax works on a single index, this is somewhat
# ambiguous and we don't want to extend this behavior forward to work
# in multi-indexes. This would amount to selecting a scalar from a
# column.
with assertRaises(KeyError):
df['2016-01-01']
# partial string match on year only
result = df.loc['2016']
expected = df
tm.assert_frame_equal(result, expected)
# partial string match on date
result = df.loc['2016-01-01']
expected = df.iloc[0:6]
tm.assert_frame_equal(result, expected)
# partial string match on date and hour, from middle
result = df.loc['2016-01-02 12']
expected = df.iloc[9:12]
tm.assert_frame_equal(result, expected)
# partial string match on secondary index
result = df_swap.loc[idx[:, '2016-01-02'], :]
expected = df_swap.iloc[[2, 3, 7, 8, 12, 13]]
tm.assert_frame_equal(result, expected)
# tuple selector with partial string match on date
result = df.loc[('2016-01-01', 'a'), :]
expected = df.iloc[[0, 3]]
tm.assert_frame_equal(result, expected)
# Slicing date on first level should break (of course)
with assertRaises(KeyError):
df_swap.loc['2016-01-01']
def test_rangeindex_fallback_coercion_bug(self):
# GH 12893
foo = pd.DataFrame(np.arange(100).reshape((10, 10)))
bar = pd.DataFrame(np.arange(100).reshape((10, 10)))
df = pd.concat({'foo': foo.stack(), 'bar': bar.stack()}, axis=1)
df.index.names = ['fizz', 'buzz']
str(df)
expected = pd.DataFrame({'bar': np.arange(100),
'foo': np.arange(100)},
index=pd.MultiIndex.from_product(
[range(10), range(10)],
names=['fizz', 'buzz']))
tm.assert_frame_equal(df, expected, check_like=True)
result = df.index.get_level_values('fizz')
expected = pd.Int64Index(np.arange(10), name='fizz').repeat(10)
tm.assert_index_equal(result, expected)
result = df.index.get_level_values('buzz')
expected = pd.Int64Index(np.tile(np.arange(10), 10), name='buzz')
tm.assert_index_equal(result, expected)
| mit |
KarlClinckspoor/SAXS_treatment | ESRF data treatment/Find_time_between_runs.py | 1 | 14049 | # -*- coding: utf-8 -*-
"""
Created on Thu Aug 31 09:57:40 2017
@author: Karl Jan Clinckspoor
[email protected] [email protected]
Made at iNANO at Aarhus University
In a collaboration project with the University of Campinas.
Last modified: 06/09/2017
"""
# todo: ask the user if they want to consider the time to be at half the live time, or at the full live time.
import glob
import sys
import re
import matplotlib.pyplot as plt
import textwrap
def CalculateTimeDifferences(frames, dead1, dead_start, dead_factor, live, live_factor, mixing_time=0):
"""This function simulates the ccdmcal function from the SAXS line.
The term mixing_time is subtracted from the first frame in order to account for experimental details.
In the ccdmcal function, this is not taken into consideration, so this won't do so by default.
Also, it might just be easier to inform dead1 as the already subtracted time, removing the necessity of mix time
For example: t KBr = 4ms, first_frame = 4 + live. 4+10 = 14ms
Also, some consider that the time of frame x should be taken at half the live time:
For example: t KBr = 4ms, first_frame = 4 + live/2 = 4 + 5 = 9ms.
It depends on your definition. This function does not consider the half times.
"""
first_frame = dead1 + live - mixing_time
times = [first_frame]
times_sum = [first_frame]
for i in range(1, frames, 1):
this_frame = dead_start * dead_factor ** (i - 1) + live * live_factor ** (i - 1)
times.append(this_frame)
times_sum.append(times_sum[i - 1] + this_frame)
return times, times_sum
def PrintCalculatedTimeFrames(times, times_sum):
"""Gets the times and prints them in an orderly fashion."""
length = len(times)
space = ' ' * 5
for frame in range(1, length + 1, 1):
textblock1 = space + 'Frame %d:\n' % frame
textblock2 = space + 'Dead+Live time: ' + str(round(times[frame - 1], 3) * 1000) + 'ms\n'
textblock3 = space + 'Total exp. time: ' + str(round(times_sum[frame - 1], 3) * 1000) + 'ms\n'
print(textblock1, textblock2, textblock3)
def ExtractFromTimestamp(file):
"""Read the whole experimental file and extracts from the header line the first timestamp.
If the regex is unable to find the time, it will search again for something less specific.
Returns the minutes and seconds, with milisseconds, as floats."""
wholefile = open(file, 'r').read()
# time = re.findall('q\[nm-1\]\s+time=\d\d\d\d-\d\d-\d\dT\d\d:(\d\d):(\d\d\.\d\d\d\d\d\d)', wholefile)
time = re.findall(r'time=\d{4}-\d\d-\d\dT\d\d:(\d\d):(\d\d\.\d{5})', wholefile)
if len(time) == 0:
time = re.findall(r'.*(\d\d):(\d\d\.\d\d\d\d\d\d)', wholefile)
minutes, seconds = float(time[0][0]), float(time[0][1])
return minutes, seconds
def FindTimesForEachExperiment(filelist, initial_deadtime, initial_livetime):
"""From a list of files of the same experiment, extracts the times for each file and then computes the time \
differences between each each file.
It first extracts the timestamps for the first file on the list and uses that as the basis for subsequent curves.
Does not consider experiments that take more than 60 minutes to complete.
Returns a list of all the times in seconds."""
times = []
for i, file in enumerate(filelist):
if i == 0:
initial_minutes, initial_seconds = ExtractFromTimestamp(file)
minutes, seconds = ExtractFromTimestamp(file)
totalseconds = ((minutes - initial_minutes) * 60 + (seconds - initial_seconds)
+ initial_deadtime + initial_livetime)
times.append(totalseconds)
return times
# todo: finish this function
def QuietFindTimes(expnumber, initial_deadtime, initial_livetime):
pass
# todo: This should be removed, a bit unnecessary. (exp, exp_times)
def PrintTimes(experiment, times):
print('---Times for %s---' % experiment)
for time in times:
print(time, 'ms')
# %%
def PrintAndCompareTimes():
"""Finds the files, asks for the user to input the factors used in the calculations, extracts the data from the \
files and then compares the differences with the calculated times."""
while True:
experiments = FindFiles()
if experiments is not None:
break
print('Retrying....')
initial_deadtime = float(input('What is the initial deadtime used for this experiment? (in seconds)\n'))
dead_start = float(input('What is dead_start, the deadtime after the first one? (in seconds)\n'))
dead_factor = float(input('What is dead factor?\n'))
initial_livetime = float(input('What is the initial livetime used for this experiment ? (in seconds)\n'))
live_factor = float(input('What is the live factor?\n'))
calculated_times, calculated_timesum = CalculateTimeDifferences(len(experiments), initial_deadtime, dead_start,
dead_factor, initial_livetime, live_factor)
times = FindTimesForEachExperiment(experiments, initial_deadtime, initial_livetime)
print('---From timestamps --- Calculated --- Difference')
for time, calc in zip(times, calculated_timesum):
print(round(time, 3) * 1000, round(calc, 3) * 1000, round(time - calc, 3) * 1000)
# %%
def WriteTimes(expnumber, times):
with open(expnumber + '.txt', 'w') as fhand:
for time in times:
fhand.write(str(round(time, 4) * 1000) + '\n')
# %%
def WriteTimesWithStep(expnumber, times):
with open(expnumber + '.txt', 'w') as fhand:
for index, time in enumerate(times):
if index == len(times):
fhand.write(str(round(time, 4) * 1000) + '\n')
return
try:
step = round(times[index + 1] - time, 4) * 1000
except:
step = 0
fhand.write(str(round(time, 4) * 1000) + ' ' + str(step) + '\n')
# %%
def FindFiles():
expnumber = input("What is the experimental number of the run you wish to find the times between each curve? "
"(1-5 digit number, or quit) \n")
if expnumber.lower() == 'quit' or expnumber == '':
sys.exit()
if not expnumber.isdecimal() and expnumber != 'all':
print('invalid experiment number: ', expnumber)
return None
if expnumber.isdecimal():
expnumber = expnumber.zfill(5)
experiments = glob.glob('*%s*.dat' % expnumber)
print('Found', len(experiments), 'files for that experiment.')
if len(experiments) == 0:
print('Oops. No experiment found.')
return None
return experiments
# Todo
def Help():
text = 'This is a script made in order to automatically calculate the timing between each frame \
obtained from the ID02 SAXS line in ESRF, Grenoble. This is a pretty reduced scope for a project, \
but this code can easily be edited to suit another purpose.\n'
text2 = 'The formula to calculate the timing is this:\n'
text3 = 'Frame 1: dead1 + live\n'
text4 = 'Frame 2-end: previous frame + dead_start*dead_factor^(frame number -2) + \
live*live_factor^(frame number -2)\n'
helptext = textwrap.wrap((text + text2 + text3 + text4), width=70)
print('-' * 70)
for line in helptext:
print(line)
print('-' * 70)
# %%
def mainmenu():
"""Main program menu, wraps up all the functions into one."""
print('##################################################################')
print('What do you want to do? (C)alculate the times based on a set of parameters, find the times for (all) files '
'in this folder, find the times for a few select experiments (default), (quit), or do you need (help)?')
print('##################################################################')
choice = input('Choice: ')
if choice == 'all':
print('\nWARNING: Make sure that, before you continue, all the files in the present folder contain solely the '
'kinetic data, in the following naming convention: *_ExpNumber_FrameNumber_*.dat, for example: '
'sc1470_saxs_03316_0001_var.dat')
do_write = input('Do you want to write all the data to external files? y/n:')
do_print = input('Do you want to print all the times for each file? y/n:')
do_step = 'n'
if do_write != 'n':
do_step = input('Do you want to write the steps on the textfile? y/(n)')
initial_deadtime = float(input('What is the initial deadtime used for this experiment? (in seconds)\n'))
initial_livetime = float(input('What is the initial livetime used for this experiment ? (in seconds)\n'))
all_files = glob.glob('*.dat')
expnumbers = []
for file in all_files:
expnumber = file.split('_')[2]
if expnumber not in expnumbers:
expnumbers.append(expnumber)
for exp in expnumbers:
exp_files = glob.glob('*%s*' % exp)
# ideally I would do this in the initial list, instead of reapplying glob, but this way is easier for now.
exp_times = FindTimesForEachExperiment(exp_files, initial_deadtime, initial_livetime)
if do_write == 'y' and do_step != 'y':
WriteTimes(exp, exp_times)
if do_write == 'y' and do_step == 'y':
WriteTimesWithStep(exp, exp_times)
if do_print == 'y':
PrintTimes(exp, exp_times)
print('Finished.')
return False
if choice == 'C':
frames = int(input('Number of frames: '))
dead1 = float(input('Dead1: '))
dead_start = float(input('Dead Start: '))
dead_factor = float(input('Dead factor: '))
live = float(input('Live time: '))
live_factor = float(input('Live factor: '))
times, times_sum = CalculateTimeDifferences(frames, dead1, dead_start, dead_factor, live, live_factor)
PrintCalculatedTimeFrames(times, times_sum)
return True
if choice == 'help':
Help()
return True
if choice == 'quit':
return False
counter = 0
while True: # todo: Finish
do_change_times = input('Are there different deadtimes for each experiment? If so, you will be prompted after '
'every file what the new parameters are. Y/(n)\n')
do_write = input('Do you want to write the data to external files? (y)/n')
do_print = input('Do you want to print the times for each file? y/(n)')
if do_write != 'n':
do_step = input('Do you want to write the steps on the textfile? y/(n)')
experiments = FindFiles()
if do_change_times == 'y':
initial_deadtime = input('What is the initial deadtime, in seconds? ')
initial_livetime = input('What is the initial livetimes, in seconds? ')
exp_times = FindTimesForEachExperiment(experiments, initial_deadtime, initial_livetime)
if counter == 0:
initial_deadtime = input('What is the initial deadtime, in seconds? ')
initial_livetime = input('What is the initial livetimes, in seconds? ')
counter += 1
exp_times = FindTimesForEachExperiment(initial_deadtime, initial_livetime)
# %%
if __name__ == '__main__':
print('This script is used to find the times between each experiment')
continue_working = True
while continue_working:
continue_working = mainmenu()
print('Bye!')
def FindTimes_(): # Depecrated
"""Initial function that was developed. It is massive and is self-contained."""
expnumber = input("What is the experimental number of the run you wish to find the times between each curve? "
"(number, all or quit) \n")
# expnumber = '3607'
if expnumber.lower() == 'quit' or expnumber == '':
sys.exit()
if not expnumber.isdecimal() and expnumber != 'all':
print('invalid experiment number: ', expnumber)
return
if expnumber.isdecimal():
expnumber = expnumber.zfill(5)
experiments = glob.glob('*%s*.dat' % expnumber)
length = len(experiments)
print('Found', length, 'files for that experiment.')
if length == 0:
print('Oops. No experiment found.')
return
initial_deadtime = float(input('What is the initial deadtime used for this experiment? (in seconds)\n'))
initial_livetime = float(input('What is the initial livetime used for this experiment ? (in seconds)\n'))
# initial_deadtime = 0.02
times = []
for item, file in enumerate(experiments):
wholefile = open(file, 'r').read()
time = re.findall(r'q\[nm-1\]\stime=\d\d\d\d-\d\d-\d\dT\d\d:(\d\d):(\d\d\.\d\d\d\d\d\d)', wholefile)
minutes, seconds = float(time[0][0]), float(time[0][1])
if item == 0:
initial_minutes = minutes
initial_seconds = seconds
totalseconds = (minutes - initial_minutes) * 60 + (
seconds - initial_seconds) + initial_deadtime + initial_livetime
# print (file, str(totalseconds) + 's')
times.append(totalseconds)
steps = []
item = 0
print('----Times----')
for item, time in enumerate(times):
print('Frame:', item + 1, round(time * 1000, 4), 'ms')
if item != 0:
step = round(times[item] - times[item - 1], 4)
steps.append(step)
print(' Step = ', step)
do_file = input('Do you want to write these times to a file? (Y/n)\n')
if do_file == 'Y':
with open(expnumber + '.tim', 'w') as fhand:
for time, step in zip(times, steps):
fhand.write(str(round(time, 4) * 1000) + ' ' + str(round(step, 4) * 1000) + '\n')
fhand.write(str(round(times[-1], 4) * 1000))
do_graph = input('Do you want to graph the times of this experiment? (Y/(n)) \n')
if do_graph == 'Y':
plt.plot(times)
plt.show()
| gpl-3.0 |
ZhouJiaLinmumu/Grasp-and-lift-EEG-challenge | ensembling/WeightedMean.py | 4 | 3347 | # -*- coding: utf-8 -*-
"""
Created on Sat Aug 15 14:12:12 2015.
@author: rc, alex
"""
import numpy as np
from collections import OrderedDict
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.metrics import roc_auc_score
from hyperopt import fmin, tpe, hp
from progressbar import Bar, ETA, Percentage, ProgressBar, RotatingMarker
class WeightedMeanClassifier(BaseEstimator, ClassifierMixin):
"""Weigted mean classifier with AUC optimization."""
def __init__(self, ensemble, step=0.025, max_evals=100, mean='arithmetic',
verbose=True):
"""Init."""
self.ensemble = ensemble
self.step = step
self.max_evals = max_evals
self.mean = mean
self.count = -1
self.verbose = verbose
self.param_space = OrderedDict()
for model in ensemble:
self.param_space[model] = hp.quniform(model, 0, 3, self.step)
# input data are arranged in a particular order, whereas hyperopt uses
# unordered lists when optimizing. The model has to keep track
# of the initial order so that correct weights are applied to columns
self.sorting = dict()
for i, m in enumerate(self.ensemble):
self.sorting[m] = i
def fit(self, X, y):
"""Fit."""
self.best_params = None
if self.mean != 'simple':
if self.verbose:
widgets = ['Training : ', Percentage(), ' ', Bar(marker=RotatingMarker()),
' ', ETA(), ' ']
self.pbar = ProgressBar(widgets=widgets, maxval=(self.max_evals * len(self.param_space)))
self.pbar.start()
objective = lambda w: -np.mean([roc_auc_score(y[:, col],
self.calcMean(X[:, col::6], w, training=True))
for col in range(6)])
self.best_params = fmin(objective, self.param_space, algo=tpe.suggest,
max_evals=self.max_evals)
if self.verbose:
print(self.best_params)
else:
self.best_params = None
def predict_proba(self, X):
"""Get predictions."""
return np.c_[[self.calcMean(X[:, col::6], self.best_params)
for col in range(6)]].transpose()
def calcMean(self, X, w, training = False):
"""Calculate Mean according to weights."""
self.count += 1
if self.verbose and self.count <= (self.max_evals * len(self.param_space)) and not self.count%10 and training:
self.pbar.update(self.count)
if self.mean == 'simple':
return np.sum(X, axis=1)/X.shape[1]
else:
w = [w[k] for k in sorted(self.sorting, key=self.sorting.get)]
if self.mean == 'arithmetic':
return np.sum(X * w, axis=1)/np.sum(w)
elif self.mean == 'geometric':
return np.exp(np.sum(np.log(X) * w, axis=1)/np.sum(w))
elif self.mean == 'power':
return 1/(1+np.exp(-np.sum(X ** w, axis=1)))
else:
print 'Mean should be either "simple", "arithmetic", "geometric" or "power"'
| bsd-3-clause |
jeffery-do/Vizdoombot | doom/lib/python3.5/site-packages/matplotlib/tri/tripcolor.py | 8 | 5868 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
from matplotlib.collections import PolyCollection, TriMesh
from matplotlib.colors import Normalize
from matplotlib.tri.triangulation import Triangulation
import numpy as np
def tripcolor(ax, *args, **kwargs):
"""
Create a pseudocolor plot of an unstructured triangular grid.
The triangulation can be specified in one of two ways; either::
tripcolor(triangulation, ...)
where triangulation is a :class:`matplotlib.tri.Triangulation`
object, or
::
tripcolor(x, y, ...)
tripcolor(x, y, triangles, ...)
tripcolor(x, y, triangles=triangles, ...)
tripcolor(x, y, mask=mask, ...)
tripcolor(x, y, triangles, mask=mask, ...)
in which case a Triangulation object will be created. See
:class:`~matplotlib.tri.Triangulation` for a explanation of these
possibilities.
The next argument must be *C*, the array of color values, either
one per point in the triangulation if color values are defined at
points, or one per triangle in the triangulation if color values
are defined at triangles. If there are the same number of points
and triangles in the triangulation it is assumed that color
values are defined at points; to force the use of color values at
triangles use the kwarg *facecolors*=C instead of just *C*.
*shading* may be 'flat' (the default) or 'gouraud'. If *shading*
is 'flat' and C values are defined at points, the color values
used for each triangle are from the mean C of the triangle's
three points. If *shading* is 'gouraud' then color values must be
defined at points.
The remaining kwargs are the same as for
:meth:`~matplotlib.axes.Axes.pcolor`.
**Example:**
.. plot:: mpl_examples/pylab_examples/tripcolor_demo.py
"""
if not ax._hold:
ax.cla()
alpha = kwargs.pop('alpha', 1.0)
norm = kwargs.pop('norm', None)
cmap = kwargs.pop('cmap', None)
vmin = kwargs.pop('vmin', None)
vmax = kwargs.pop('vmax', None)
shading = kwargs.pop('shading', 'flat')
facecolors = kwargs.pop('facecolors', None)
if shading not in ['flat', 'gouraud']:
raise ValueError("shading must be one of ['flat', 'gouraud'] "
"not {0}".format(shading))
tri, args, kwargs = Triangulation.get_from_args_and_kwargs(*args, **kwargs)
# C is the colors array defined at either points or faces (i.e. triangles).
# If facecolors is None, C are defined at points.
# If facecolors is not None, C are defined at faces.
if facecolors is not None:
C = facecolors
else:
C = np.asarray(args[0])
# If there are a different number of points and triangles in the
# triangulation, can omit facecolors kwarg as it is obvious from
# length of C whether it refers to points or faces.
# Do not do this for gouraud shading.
if (facecolors is None and len(C) == len(tri.triangles) and
len(C) != len(tri.x) and shading != 'gouraud'):
facecolors = C
# Check length of C is OK.
if ((facecolors is None and len(C) != len(tri.x)) or
(facecolors is not None and len(C) != len(tri.triangles))):
raise ValueError('Length of color values array must be the same '
'as either the number of triangulation points '
'or triangles')
# Handling of linewidths, shading, edgecolors and antialiased as
# in Axes.pcolor
linewidths = (0.25,)
if 'linewidth' in kwargs:
kwargs['linewidths'] = kwargs.pop('linewidth')
kwargs.setdefault('linewidths', linewidths)
edgecolors = 'none'
if 'edgecolor' in kwargs:
kwargs['edgecolors'] = kwargs.pop('edgecolor')
ec = kwargs.setdefault('edgecolors', edgecolors)
if 'antialiased' in kwargs:
kwargs['antialiaseds'] = kwargs.pop('antialiased')
if 'antialiaseds' not in kwargs and ec.lower() == "none":
kwargs['antialiaseds'] = False
if shading == 'gouraud':
if facecolors is not None:
raise ValueError('Gouraud shading does not support the use '
'of facecolors kwarg')
if len(C) != len(tri.x):
raise ValueError('For gouraud shading, the length of color '
'values array must be the same as the '
'number of triangulation points')
collection = TriMesh(tri, **kwargs)
else:
# Vertices of triangles.
maskedTris = tri.get_masked_triangles()
verts = np.concatenate((tri.x[maskedTris][..., np.newaxis],
tri.y[maskedTris][..., np.newaxis]), axis=2)
# Color values.
if facecolors is None:
# One color per triangle, the mean of the 3 vertex color values.
C = C[maskedTris].mean(axis=1)
elif tri.mask is not None:
# Remove color values of masked triangles.
C = C.compress(1-tri.mask)
collection = PolyCollection(verts, **kwargs)
collection.set_alpha(alpha)
collection.set_array(C)
if norm is not None:
if not isinstance(norm, Normalize):
msg = "'norm' must be an instance of 'Normalize'"
raise ValueError(msg)
collection.set_cmap(cmap)
collection.set_norm(norm)
if vmin is not None or vmax is not None:
collection.set_clim(vmin, vmax)
else:
collection.autoscale_None()
ax.grid(False)
minx = tri.x.min()
maxx = tri.x.max()
miny = tri.y.min()
maxy = tri.y.max()
corners = (minx, miny), (maxx, maxy)
ax.update_datalim(corners)
ax.autoscale_view()
ax.add_collection(collection)
return collection
| mit |
coder-james/mxnet | example/svm_mnist/svm_mnist.py | 7 | 3545 |
#############################################################
## Please read the README.md document for better reference ##
#############################################################
from __future__ import print_function
import mxnet as mx
import numpy as np
from sklearn.datasets import fetch_mldata
from sklearn.decomposition import PCA
# import matplotlib.pyplot as plt
import logging
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
# Network declaration as symbols. The following pattern was based
# on the article, but feel free to play with the number of nodes
# and with the activation function
data = mx.symbol.Variable('data')
fc1 = mx.symbol.FullyConnected(data = data, name='fc1', num_hidden=512)
act1 = mx.symbol.Activation(data = fc1, name='relu1', act_type="relu")
fc2 = mx.symbol.FullyConnected(data = act1, name = 'fc2', num_hidden = 512)
act2 = mx.symbol.Activation(data = fc2, name='relu2', act_type="relu")
fc3 = mx.symbol.FullyConnected(data = act2, name='fc3', num_hidden=10)
# Here we add the ultimate layer based on L2-SVM objective
mlp = mx.symbol.SVMOutput(data=fc3, name='svm')
# To use L1-SVM objective, comment the line above and uncomment the line below
# mlp = mx.symbol.SVMOutput(data=fc3, name='svm', use_linear=True)
# Now we fetch MNIST dataset, add some noise, as the article suggests,
# permutate and assign the examples to be used on our network
mnist = fetch_mldata('MNIST original')
mnist_pca = PCA(n_components=70).fit_transform(mnist.data)
noise = np.random.normal(size=mnist_pca.shape)
mnist_pca += noise
np.random.seed(1234) # set seed for deterministic ordering
p = np.random.permutation(mnist_pca.shape[0])
X = mnist_pca[p]
Y = mnist.target[p]
X_show = mnist.data[p]
# This is just to normalize the input to a value inside [0,1],
# and separate train set and test set
X = X.astype(np.float32)/255
X_train = X[:60000]
X_test = X[60000:]
X_show = X_show[60000:]
Y_train = Y[:60000]
Y_test = Y[60000:]
# Article's suggestion on batch size
batch_size = 200
train_iter = mx.io.NDArrayIter(X_train, Y_train, batch_size=batch_size)
test_iter = mx.io.NDArrayIter(X_test, Y_test, batch_size=batch_size)
# A quick work around to prevent mxnet complaining the lack of a softmax_label
train_iter.label = mx.io._init_data(Y_train, allow_empty=True, default_name='svm_label')
test_iter.label = mx.io._init_data(Y_test, allow_empty=True, default_name='svm_label')
# Here we instatiate and fit the model for our data
# The article actually suggests using 400 epochs,
# But I reduced to 10, for convinience
mod = mx.mod.Module(
context = mx.cpu(0), # Run on CPU 0
symbol = mlp, # Use the network we just defined
label_names = ['svm_label'],
)
mod.fit(
train_data=train_iter,
eval_data=test_iter, # Testing data set. MXNet computes scores on test set every epoch
batch_end_callback = mx.callback.Speedometer(batch_size, 200), # Logging module to print out progress
num_epoch = 10, # Train for 10 epochs
optimizer_params = {
'learning_rate': 0.1, # Learning rate
'momentum': 0.9, # Momentum for SGD with momentum
'wd': 0.00001, # Weight decay for regularization
},
)
# Uncomment to view an example
# plt.imshow((X_show[0].reshape((28,28))*255).astype(np.uint8), cmap='Greys_r')
# plt.show()
# print 'Result:', model.predict(X_test[0:1])[0].argmax()
# Now it prints how good did the network did for this configuration
print('Accuracy:', mod.score(test_iter, mx.metric.Accuracy())[0][1]*100, '%')
| apache-2.0 |
VDBWRAIR/bioframes | tests/testsamframe.py | 1 | 4444 | #from bioframes import samframe
#import mock
#import pandas as pd
#from pandas.util.testing import assert_series_equal, assert_frame_equal, assert_index_equal
#from numpy.testing import assert_array_equal, assert_array_almost_equal
#from operator import itemgetter, attrgetter as attr
#from testbiopandas import mock_file
#import unittest
#
##class TestClassicSam(unittest.TestCase):
## def setUp(self):
## self.samtext = '\n'.join(['read1\t1\tchr1\t1\t60 10M = 1 1 TTTCGAATC FFFFFFFFF NM:i:3 AS:i:231 XS:i:0 RG:Z:MiSeq',
## 'read2 1 chr2 1 60 10M = 1 1 CTTCGATC AFFDDDDD NM:i:3 AS:i:231 XS:i:0 RG:Z:MiSeq',
## 'read3 1 chr1 1 60 10M = 1 1 CCGATCAA FF@@@F@F NM:i:3 AS:i:231 XS:i:0 RG:Z:MiSeq'])
## def test_sam_to_df(self):
## result = pc.samview_to_df(self.samtext)
## self.assertEquals(result.columns.tolist(), self.columns)
## self.assertEquals(result.ix[2]['QNAME'], 'read3')
##
## def test_df_from_collection_attributes(self):
## mocks = [mock.Mock() for i in range(5)]
## [[mock_do() for i in range(index)] for index, mock_do in enumerate(mocks)]
## columns = ['call_count', 'called']
## expected = pd.DataFrame( [(i, bool(i)) for i in range(5)])
## expected.columns = columns
## result = bf.df_from_collection_attributes(columns, mocks)
## assert_frame_equal(expected, result)
##
#
#class TestSamframe(unittest.TestCase):
# def setUp(self):
# #TODO: fix this so data is accurate; i.e.:
# # ''' assert sum(itemgetter('M', 'I', 'S', '=', 'X')) == len(seq) == len(quality), \ "cigar string M/I/S/=/X should sum to the length of the query sequence." '''
#
#
# self.samtext='\n'.join([
# 'read1 60 chr1 1 1 10M = 1 1 TTTCGAATC FFFFFFFFF NM:i:3 AS:i:231 XS:i:0 RG:Z:MiSeq',
# 'read2 8 chr2 1 1 3I3M4D = 1 1 CTTCGATC AFFDDDDD NM:i:3 AS:i:2 XS:i:0 RG:Z:Sanger',
# 'read3 60 chr1 1 1 2D2M2I2M2= = 1 1 CCGATCAA FF@@@F@F NM:i:3 AS:i:231 XS:i:0 RG:Z:MiSeq'])
#
# self.result = mock_file(samframe.load_sam, self.samtext)
#
# def test_cigar_scores(self):
# #TODO: with mock_open
# e_strings = pd.Series(['10M', '3I3M4D', '2D2M2I2M2='])
# e_m, e_i, e_d, e_total = pd.Series([10, 3, 4]), pd.Series([float('nan'), 3, 2]), pd.Series([float('nan'), 4, 2]), pd.Series([0, 7, 4])
# assert_series_equal(self.result.cigar_I, e_i)
# assert_series_equal(self.result.cigar_M, e_m)
# assert_series_equal(self.result.cigar_D, e_d)
# assert_series_equal(self.result.cigar_score, e_total)
# assert_series_equal(self.result.CIGAR, e_strings)
#
# def test_options(self):
# e_nm, e_as, e_xs, e_rg = map(pd.Series, [[3, 3, 3], [231, 2, 231], [0, 0, 0], ['MiSeq', 'Sanger', 'MiSeq']])
# df = self.result
# results = [df.NM, df.AS, df.XS, df.RG]
# map(assert_series_equal, [e_nm, e_as, e_xs, e_rg], results)
#
# def test_df_with_options(self):
# #df = self.result.set_index( ['QNAME', 'POS', 'RNAME'])
# sam_columns = ["QNAME", "FLAG", "RNAME", "POS", "MAPQ", "CIGAR", "RNEXT", "PNEXT", "TLEN", "SEQ", "QUAL"] + ['NM', 'AS', 'XS', 'RG']
# expected = pd.Series(['read3', 60, 'chr1', 1,1, '2D2M2I2M2=', '=', 1, 1, 'CCGATCAA', 'FF@@@F@F', 3, 231, 0, 'MiSeq']).values#, 4, 2, 2, 4)
# res = self.result.ix[2][sam_columns].values
# assert_array_equal(expected, res)
# #assert_series_equal(df.ix['read3', 1, 'chr1'][:len(expected)], expected)
#
# def test_flags(self):
# df = self.result
# middle_e = pd.Series([False, False, False, True], dtype=object).values
# outer_e = pd.Series([False, False, True, True], dtype=object).values
# names = ["template having multiple segments in sequencing",
# "each segment properly aligned according to the aligner",
# "segment unmapped",
# "next segment in the template unmapped"]
## inner_actual = df[df['QNAME'] == 'read2'][names]
## outer_actual1= df[df['QNAME'] == 'read1'][names]
## outer_actual3= df[df['QNAME'] == 'read3'][names]
# flag8, flag60_1, flag60_2 = map(attr('values'), map(itemgetter(names), itemgetter(1, 0, 2)(df.ix)))
# assert_array_equal(middle_e,flag8)
# assert_array_equal(outer_e, flag60_1)
# assert_array_equal(outer_e, flag60_2)
| gpl-2.0 |
jfinkels/networkx | examples/drawing/chess_masters.py | 34 | 5104 | #!/usr/bin/env python
"""
An example of the MultiDiGraph clas
The function chess_pgn_graph reads a collection of chess
matches stored in the specified PGN file
(PGN ="Portable Game Notation")
Here the (compressed) default file ---
chess_masters_WCC.pgn.bz2 ---
contains all 685 World Chess Championship matches
from 1886 - 1985.
(data from http://chessproblem.my-free-games.com/chess/games/Download-PGN.php)
The chess_pgn_graph() function returns a MultiDiGraph
with multiple edges. Each node is
the last name of a chess master. Each edge is directed
from white to black and contains selected game info.
The key statement in chess_pgn_graph below is
G.add_edge(white, black, game_info)
where game_info is a dict describing each game.
"""
# Copyright (C) 2006-2016 by
# Aric Hagberg <[email protected]>
# Dan Schult <[email protected]>
# Pieter Swart <[email protected]>
# All rights reserved.
# BSD license.
import networkx as nx
# tag names specifying what game info should be
# stored in the dict on each digraph edge
game_details=["Event",
"Date",
"Result",
"ECO",
"Site"]
def chess_pgn_graph(pgn_file="chess_masters_WCC.pgn.bz2"):
"""Read chess games in pgn format in pgn_file.
Filenames ending in .gz or .bz2 will be uncompressed.
Return the MultiDiGraph of players connected by a chess game.
Edges contain game data in a dict.
"""
import bz2
G=nx.MultiDiGraph()
game={}
datafile = bz2.BZ2File(pgn_file)
lines = (line.decode().rstrip('\r\n') for line in datafile)
for line in lines:
if line.startswith('['):
tag,value=line[1:-1].split(' ',1)
game[str(tag)]=value.strip('"')
else:
# empty line after tag set indicates
# we finished reading game info
if game:
white=game.pop('White')
black=game.pop('Black')
G.add_edge(white, black, **game)
game={}
return G
if __name__ == '__main__':
G=chess_pgn_graph()
ngames=G.number_of_edges()
nplayers=G.number_of_nodes()
print("Loaded %d chess games between %d players\n"\
% (ngames,nplayers))
# identify connected components
# of the undirected version
Gcc=list(nx.connected_component_subgraphs(G.to_undirected()))
if len(Gcc)>1:
print("Note the disconnected component consisting of:")
print(Gcc[1].nodes())
# find all games with B97 opening (as described in ECO)
openings=set([game_info['ECO']
for (white,black,game_info) in G.edges(data=True)])
print("\nFrom a total of %d different openings,"%len(openings))
print('the following games used the Sicilian opening')
print('with the Najdorff 7...Qb6 "Poisoned Pawn" variation.\n')
for (white,black,game_info) in G.edges(data=True):
if game_info['ECO']=='B97':
print(white,"vs",black)
for k,v in game_info.items():
print(" ",k,": ",v)
print("\n")
try:
import matplotlib.pyplot as plt
except ImportError:
import sys
print("Matplotlib needed for drawing. Skipping")
sys.exit(0)
# make new undirected graph H without multi-edges
H=nx.Graph(G)
# edge width is proportional number of games played
edgewidth=[]
for (u,v,d) in H.edges(data=True):
edgewidth.append(len(G.get_edge_data(u,v)))
# node size is proportional to number of games won
wins=dict.fromkeys(G.nodes(),0.0)
for (u,v,d) in G.edges(data=True):
r=d['Result'].split('-')
if r[0]=='1':
wins[u]+=1.0
elif r[0]=='1/2':
wins[u]+=0.5
wins[v]+=0.5
else:
wins[v]+=1.0
try:
pos=nx.nx_agraph.graphviz_layout(H)
except:
pos=nx.spring_layout(H,iterations=20)
plt.rcParams['text.usetex'] = False
plt.figure(figsize=(8,8))
nx.draw_networkx_edges(H,pos,alpha=0.3,width=edgewidth, edge_color='m')
nodesize=[wins[v]*50 for v in H]
nx.draw_networkx_nodes(H,pos,node_size=nodesize,node_color='w',alpha=0.4)
nx.draw_networkx_edges(H,pos,alpha=0.4,node_size=0,width=1,edge_color='k')
nx.draw_networkx_labels(H,pos,fontsize=14)
font = {'fontname' : 'Helvetica',
'color' : 'k',
'fontweight' : 'bold',
'fontsize' : 14}
plt.title("World Chess Championship Games: 1886 - 1985", font)
# change font and write text (using data coordinates)
font = {'fontname' : 'Helvetica',
'color' : 'r',
'fontweight' : 'bold',
'fontsize' : 14}
plt.text(0.5, 0.97, "edge width = # games played",
horizontalalignment='center',
transform=plt.gca().transAxes)
plt.text(0.5, 0.94, "node size = # games won",
horizontalalignment='center',
transform=plt.gca().transAxes)
plt.axis('off')
plt.savefig("chess_masters.png",dpi=75)
print("Wrote chess_masters.png")
plt.show() # display
| bsd-3-clause |
fyffyt/scikit-learn | examples/cluster/plot_agglomerative_clustering.py | 343 | 2931 | """
Agglomerative clustering with and without structure
===================================================
This example shows the effect of imposing a connectivity graph to capture
local structure in the data. The graph is simply the graph of 20 nearest
neighbors.
Two consequences of imposing a connectivity can be seen. First clustering
with a connectivity matrix is much faster.
Second, when using a connectivity matrix, average and complete linkage are
unstable and tend to create a few clusters that grow very quickly. Indeed,
average and complete linkage fight this percolation behavior by considering all
the distances between two clusters when merging them. The connectivity
graph breaks this mechanism. This effect is more pronounced for very
sparse graphs (try decreasing the number of neighbors in
kneighbors_graph) and with complete linkage. In particular, having a very
small number of neighbors in the graph, imposes a geometry that is
close to that of single linkage, which is well known to have this
percolation instability.
"""
# Authors: Gael Varoquaux, Nelle Varoquaux
# License: BSD 3 clause
import time
import matplotlib.pyplot as plt
import numpy as np
from sklearn.cluster import AgglomerativeClustering
from sklearn.neighbors import kneighbors_graph
# Generate sample data
n_samples = 1500
np.random.seed(0)
t = 1.5 * np.pi * (1 + 3 * np.random.rand(1, n_samples))
x = t * np.cos(t)
y = t * np.sin(t)
X = np.concatenate((x, y))
X += .7 * np.random.randn(2, n_samples)
X = X.T
# Create a graph capturing local connectivity. Larger number of neighbors
# will give more homogeneous clusters to the cost of computation
# time. A very large number of neighbors gives more evenly distributed
# cluster sizes, but may not impose the local manifold structure of
# the data
knn_graph = kneighbors_graph(X, 30, include_self=False)
for connectivity in (None, knn_graph):
for n_clusters in (30, 3):
plt.figure(figsize=(10, 4))
for index, linkage in enumerate(('average', 'complete', 'ward')):
plt.subplot(1, 3, index + 1)
model = AgglomerativeClustering(linkage=linkage,
connectivity=connectivity,
n_clusters=n_clusters)
t0 = time.time()
model.fit(X)
elapsed_time = time.time() - t0
plt.scatter(X[:, 0], X[:, 1], c=model.labels_,
cmap=plt.cm.spectral)
plt.title('linkage=%s (time %.2fs)' % (linkage, elapsed_time),
fontdict=dict(verticalalignment='top'))
plt.axis('equal')
plt.axis('off')
plt.subplots_adjust(bottom=0, top=.89, wspace=0,
left=0, right=1)
plt.suptitle('n_cluster=%i, connectivity=%r' %
(n_clusters, connectivity is not None), size=17)
plt.show()
| bsd-3-clause |
Garrett-R/scikit-learn | sklearn/metrics/cluster/tests/test_supervised.py | 44 | 7663 | import numpy as np
from sklearn.metrics.cluster import adjusted_rand_score
from sklearn.metrics.cluster import homogeneity_score
from sklearn.metrics.cluster import completeness_score
from sklearn.metrics.cluster import v_measure_score
from sklearn.metrics.cluster import homogeneity_completeness_v_measure
from sklearn.metrics.cluster import adjusted_mutual_info_score
from sklearn.metrics.cluster import normalized_mutual_info_score
from sklearn.metrics.cluster import mutual_info_score
from sklearn.metrics.cluster import expected_mutual_information
from sklearn.metrics.cluster import contingency_matrix
from sklearn.metrics.cluster import entropy
from sklearn.utils.testing import assert_raise_message
from nose.tools import assert_almost_equal
from nose.tools import assert_equal
from numpy.testing import assert_array_almost_equal
score_funcs = [
adjusted_rand_score,
homogeneity_score,
completeness_score,
v_measure_score,
adjusted_mutual_info_score,
normalized_mutual_info_score,
]
def test_error_messages_on_wrong_input():
for score_func in score_funcs:
expected = ('labels_true and labels_pred must have same size,'
' got 2 and 3')
assert_raise_message(ValueError, expected, score_func,
[0, 1], [1, 1, 1])
expected = "labels_true must be 1D: shape is (2"
assert_raise_message(ValueError, expected, score_func,
[[0, 1], [1, 0]], [1, 1, 1])
expected = "labels_pred must be 1D: shape is (2"
assert_raise_message(ValueError, expected, score_func,
[0, 1, 0], [[1, 1], [0, 0]])
def test_perfect_matches():
for score_func in score_funcs:
assert_equal(score_func([], []), 1.0)
assert_equal(score_func([0], [1]), 1.0)
assert_equal(score_func([0, 0, 0], [0, 0, 0]), 1.0)
assert_equal(score_func([0, 1, 0], [42, 7, 42]), 1.0)
assert_equal(score_func([0., 1., 0.], [42., 7., 42.]), 1.0)
assert_equal(score_func([0., 1., 2.], [42., 7., 2.]), 1.0)
assert_equal(score_func([0, 1, 2], [42, 7, 2]), 1.0)
def test_homogeneous_but_not_complete_labeling():
# homogeneous but not complete clustering
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 0, 1, 1, 1],
[0, 0, 0, 1, 2, 2])
assert_almost_equal(h, 1.00, 2)
assert_almost_equal(c, 0.69, 2)
assert_almost_equal(v, 0.81, 2)
def test_complete_but_not_homogeneous_labeling():
# complete but not homogeneous clustering
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 1, 1, 2, 2],
[0, 0, 1, 1, 1, 1])
assert_almost_equal(h, 0.58, 2)
assert_almost_equal(c, 1.00, 2)
assert_almost_equal(v, 0.73, 2)
def test_not_complete_and_not_homogeneous_labeling():
# neither complete nor homogeneous but not so bad either
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 0, 1, 1, 1],
[0, 1, 0, 1, 2, 2])
assert_almost_equal(h, 0.67, 2)
assert_almost_equal(c, 0.42, 2)
assert_almost_equal(v, 0.52, 2)
def test_non_consicutive_labels():
# regression tests for labels with gaps
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 0, 2, 2, 2],
[0, 1, 0, 1, 2, 2])
assert_almost_equal(h, 0.67, 2)
assert_almost_equal(c, 0.42, 2)
assert_almost_equal(v, 0.52, 2)
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 0, 1, 1, 1],
[0, 4, 0, 4, 2, 2])
assert_almost_equal(h, 0.67, 2)
assert_almost_equal(c, 0.42, 2)
assert_almost_equal(v, 0.52, 2)
ari_1 = adjusted_rand_score([0, 0, 0, 1, 1, 1], [0, 1, 0, 1, 2, 2])
ari_2 = adjusted_rand_score([0, 0, 0, 1, 1, 1], [0, 4, 0, 4, 2, 2])
assert_almost_equal(ari_1, 0.24, 2)
assert_almost_equal(ari_2, 0.24, 2)
def uniform_labelings_scores(score_func, n_samples, k_range, n_runs=10,
seed=42):
"""Compute score for random uniform cluster labelings"""
random_labels = np.random.RandomState(seed).random_integers
scores = np.zeros((len(k_range), n_runs))
for i, k in enumerate(k_range):
for j in range(n_runs):
labels_a = random_labels(low=0, high=k - 1, size=n_samples)
labels_b = random_labels(low=0, high=k - 1, size=n_samples)
scores[i, j] = score_func(labels_a, labels_b)
return scores
def test_adjustment_for_chance():
"""Check that adjusted scores are almost zero on random labels"""
n_clusters_range = [2, 10, 50, 90]
n_samples = 100
n_runs = 10
scores = uniform_labelings_scores(
adjusted_rand_score, n_samples, n_clusters_range, n_runs)
max_abs_scores = np.abs(scores).max(axis=1)
assert_array_almost_equal(max_abs_scores, [0.02, 0.03, 0.03, 0.02], 2)
def test_adjusted_mutual_info_score():
"""Compute the Adjusted Mutual Information and test against known values"""
labels_a = np.array([1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3])
labels_b = np.array([1, 1, 1, 1, 2, 1, 2, 2, 2, 2, 3, 1, 3, 3, 3, 2, 2])
# Mutual information
mi = mutual_info_score(labels_a, labels_b)
assert_almost_equal(mi, 0.41022, 5)
# Expected mutual information
C = contingency_matrix(labels_a, labels_b)
n_samples = np.sum(C)
emi = expected_mutual_information(C, n_samples)
assert_almost_equal(emi, 0.15042, 5)
# Adjusted mutual information
ami = adjusted_mutual_info_score(labels_a, labels_b)
assert_almost_equal(ami, 0.27502, 5)
ami = adjusted_mutual_info_score([1, 1, 2, 2], [2, 2, 3, 3])
assert_equal(ami, 1.0)
# Test with a very large array
a110 = np.array([list(labels_a) * 110]).flatten()
b110 = np.array([list(labels_b) * 110]).flatten()
ami = adjusted_mutual_info_score(a110, b110)
# This is not accurate to more than 2 places
assert_almost_equal(ami, 0.37, 2)
def test_entropy():
ent = entropy([0, 0, 42.])
assert_almost_equal(ent, 0.6365141, 5)
assert_almost_equal(entropy([]), 1)
def test_contingency_matrix():
labels_a = np.array([1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3])
labels_b = np.array([1, 1, 1, 1, 2, 1, 2, 2, 2, 2, 3, 1, 3, 3, 3, 2, 2])
C = contingency_matrix(labels_a, labels_b)
C2 = np.histogram2d(labels_a, labels_b,
bins=(np.arange(1, 5),
np.arange(1, 5)))[0]
assert_array_almost_equal(C, C2)
C = contingency_matrix(labels_a, labels_b, eps=.1)
assert_array_almost_equal(C, C2 + .1)
def test_exactly_zero_info_score():
"""Check numerical stability when information is exactly zero"""
for i in np.logspace(1, 4, 4).astype(np.int):
labels_a, labels_b = np.ones(i, dtype=np.int),\
np.arange(i, dtype=np.int)
assert_equal(normalized_mutual_info_score(labels_a, labels_b), 0.0)
assert_equal(v_measure_score(labels_a, labels_b), 0.0)
assert_equal(adjusted_mutual_info_score(labels_a, labels_b), 0.0)
assert_equal(normalized_mutual_info_score(labels_a, labels_b), 0.0)
def test_v_measure_and_mutual_information(seed=36):
"""Check relation between v_measure, entropy and mutual information"""
for i in np.logspace(1, 4, 4).astype(np.int):
random_state = np.random.RandomState(seed)
labels_a, labels_b = random_state.random_integers(0, 10, i),\
random_state.random_integers(0, 10, i)
assert_almost_equal(v_measure_score(labels_a, labels_b),
2.0 * mutual_info_score(labels_a, labels_b) /
(entropy(labels_a) + entropy(labels_b)), 0)
| bsd-3-clause |
bthirion/nipy | examples/algorithms/ward_clustering.py | 3 | 2047 | #!/usr/bin/env python
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
from __future__ import print_function # Python 2/3 compatibility
__doc__ = """
Demo ward clustering on a graph: various ways of forming clusters and dendrogram
Requires matplotlib
"""
print(__doc__)
import numpy as np
from numpy.random import randn, rand
try:
import matplotlib.pyplot as plt
except ImportError:
raise RuntimeError("This script needs the matplotlib library")
from nipy.algorithms.graph import knn
from nipy.algorithms.clustering.hierarchical_clustering import ward
# n = number of points, k = number of nearest neighbours
n = 100
k = 5
# Set verbose to True to see more printed output
verbose = False
X = randn(n, 2)
X[:np.ceil(n / 3)] += 3
G = knn(X, 5)
tree = ward(G, X, verbose)
threshold = .5 * n
u = tree.partition(threshold)
plt.figure(figsize=(12, 6))
plt.subplot(1, 3, 1)
for i in range(u.max()+1):
plt.plot(X[u == i, 0], X[u == i, 1], 'o', color=(rand(), rand(), rand()))
plt.axis('tight')
plt.axis('off')
plt.title('clustering into clusters \n of inertia < %g' % threshold)
u = tree.split(k)
plt.subplot(1, 3, 2)
for e in range(G.E):
plt.plot([X[G.edges[e, 0], 0], X[G.edges[e, 1], 0]],
[X[G.edges[e, 0], 1], X[G.edges[e, 1], 1]], 'k')
for i in range(u.max() + 1):
plt.plot(X[u == i, 0], X[u == i, 1], 'o', color=(rand(), rand(), rand()))
plt.axis('tight')
plt.axis('off')
plt.title('clustering into 5 clusters')
nl = np.sum(tree.isleaf())
validleaves = np.zeros(n)
validleaves[:np.ceil(n / 4)] = 1
valid = np.zeros(tree.V, 'bool')
valid[tree.isleaf()] = validleaves.astype('bool')
nv = np.sum(validleaves)
nv0 = 0
while nv > nv0:
nv0 = nv
for v in range(tree.V):
if valid[v]:
valid[tree.parents[v]]=1
nv = np.sum(valid)
ax = plt.subplot(1, 3, 3)
ax = tree.plot(ax)
ax.set_title('Dendrogram')
ax.set_visible(True)
plt.show()
if verbose:
print('List of sub trees')
print(tree.list_of_subtrees())
| bsd-3-clause |
lmjohns3/cube-experiment | plots/utils.py | 1 | 2493 | import numpy as np
import matplotlib.colors
COLORS = {
'marker00-r-head-back': '#9467bd',
'marker01-r-head-front': '#9467bd',
'marker02-l-head-front': '#9467bd',
'marker03-l-head-back': '#9467bd',
'marker07-r-shoulder': '#111111',
'marker13-r-fing-index': '#111111',
'marker14-r-mc-outer': '#111111',
'marker19-l-shoulder': '#111111',
'marker25-l-fing-index': '#111111',
'marker26-l-mc-outer': '#111111',
'marker31-sternum': '#111111',
'marker34-l-ilium': '#2ca02c',
'marker35-r-ilium': '#2ca02c',
'marker36-r-hip': '#2ca02c',
'marker40-r-heel': '#1f77b4',
'marker41-r-mt-outer': '#1f77b4',
'marker42-r-mt-inner': '#1f77b4',
'marker43-l-hip': '#2ca02c',
'marker47-l-heel': '#d62728',
'marker48-l-mt-outer': '#d62728',
'marker49-l-mt-inner': '#d62728',
}
RCM = matplotlib.colors.LinearSegmentedColormap('b', dict(
red= ((0, 0.8, 0.8), (1, 0.8, 0.8)),
green=((0, 0.1, 0.1), (1, 0.1, 0.1)),
blue= ((0, 0.1, 0.1), (1, 0.1, 0.1)),
alpha=((0, 1.0, 1.0), (1, 0.0, 0.0)),
))
GCM = matplotlib.colors.LinearSegmentedColormap('b', dict(
red= ((0, 0.1, 0.1), (1, 0.1, 0.1)),
green=((0, 0.6, 0.6), (1, 0.6, 0.6)),
blue= ((0, 0.1, 0.1), (1, 0.1, 0.1)),
alpha=((0, 1.0, 1.0), (1, 0.0, 0.0)),
))
BCM = matplotlib.colors.LinearSegmentedColormap('r', dict(
red= ((0, 0.1, 0.1), (1, 0.1, 0.1)),
green=((0, 0.5, 0.5), (1, 0.5, 0.5)),
blue= ((0, 0.7, 0.7), (1, 0.7, 0.7)),
alpha=((0, 1.0, 1.0), (1, 0.0, 0.0)),
))
OCM = matplotlib.colors.LinearSegmentedColormap('r', dict(
red= ((0, 1.0, 1.0), (1, 1.0, 1.0)),
green=((0, 0.5, 0.5), (1, 0.5, 0.5)),
blue= ((0, 0.0, 0.0), (1, 0.0, 0.0)),
alpha=((0, 1.0, 1.0), (1, 0.0, 0.0)),
))
PCM = matplotlib.colors.LinearSegmentedColormap('r', dict(
red= ((0, 0.6, 0.6), (1, 0.6, 0.6)),
green=((0, 0.4, 0.4), (1, 0.4, 0.4)),
blue= ((0, 0.7, 0.7), (1, 0.7, 0.7)),
alpha=((0, 1.0, 1.0), (1, 0.0, 0.0)),
))
# fewf, http://stackoverflow.com/questions/4494404
def contig(cond):
'''Return (start, end) indices for contiguous blocks where cond is True.'''
idx = np.diff(cond).nonzero()[0] + 1
if cond[0]:
idx = np.r_[0, idx]
if cond[-1]:
idx = np.r_[idx, cond.size]
return idx.reshape((-1, 2))
def local_minima(a):
'''Return indexes of local minima in a.'''
minima = np.r_[True, a[1:] < a[:-1]] & np.r_[a[:-1] < a[1:], True]
return minima.nonzero()[0]
| mit |
davidwhogg/DeprojectAllGalaxies | scripts/astrohack_projections.py | 1 | 15510 |
import numpy
import scipy.stats
from scipy.stats import multivariate_normal
from scipy.linalg import orth
import matplotlib.pyplot as plt
from math import pi ,sin, cos
import h5py
from scipy.ndimage.filters import gaussian_filter
class rotation_3d(object):
"""
the class allows one to rotate a 3D vector in different directions
"""
def __init__(self):
self.rot_mat_x = numpy.eye(3)
self.rot_mat_y = numpy.eye(3)
self.rot_mat_z = numpy.eye(3)
def _calc_rotation_matrix_x(self, theta, units='deg'):
assert units=='deg' or units=='rad'
if units=='deg':
theta_rad = theta * pi / 180.0
self.rot_mat_x = numpy.array((1, 0, 0, 0, cos(theta_rad), -sin(theta_rad), 0, sin(theta_rad), cos(theta_rad))).reshape((3, 3))
def _calc_rotation_matrix_y(self, theta, units='deg'):
assert units=='deg' or units=='rad'
if units=='deg':
theta_rad = theta * pi / 180.0
self.rot_mat_y = numpy.array((cos(theta_rad), 0, sin(theta_rad), 0, 1, 0, -sin(theta_rad), 0, cos(theta_rad))).reshape((3, 3))
def _calc_rotation_matrix_z(self, theta, units='deg'):
assert units=='deg' or units=='rad'
if units=='deg':
theta_rad = theta * pi / 180.0
self.rot_mat_z = numpy.array((cos(theta_rad), -sin(theta_rad), 0, sin(theta_rad), cos(theta_rad), 0, 0, 0, 1)).reshape((3, 3))
def _calc_rotation_matrix(self):
self.rot_mat = numpy.dot(numpy.dot(self.rot_mat_x, self.rot_mat_y), self.rot_mat_z)
def return_rotation_matrix(self, theta_x, theta_y, theta_z, units='deg'):
"""
function rotates a vector in 3D with three given angles
"""
assert units=='deg' or units=='rad'
self._calc_rotation_matrix_x(theta_x, units)
self._calc_rotation_matrix_y(theta_y, units)
self._calc_rotation_matrix_z(theta_z, units)
self._calc_rotation_matrix()
return self.rot_mat
def rotate_vector(self, theta_x, theta_y, theta_z, vector, units='deg'):
"""
function rotates a vector in 3D with three given angles
"""
assert units=='deg' or units=='rad'
assert vector.shape == (3, )
self._calc_rotation_matrix_x(theta_x, units)
self._calc_rotation_matrix_y(theta_y, units)
self._calc_rotation_matrix_z(theta_z, units)
self._calc_rotation_matrix()
return numpy.dot(vector, self.rot_mat)
class mixture_of_gaussians(object):
"""
the class represents a D dimensional galaxy model which is constructed from Gaussians
"""
def __init__(self, D):
self.alphas = []
self.mus = []
self.fis = []
self.K = 0
self.D = D
def copy(self):
"""
This code is brittle because we are not using proper setters (or adders) to construct the mixture.
"""
new = mixture_of_gaussians(self.D)
for alpha, mu, fi in zip(self.alphas, self.mus, self.fis):
new.add_gaussian(alpha, mu.copy(), fi.copy())
return new
def __mul__(self, factor):
new = self.copy()
for k, alpha in enumerate(self.alphas):
new.alphas[k] = alpha * factor
return new
def rescale(self, scale):
"""
Expand everything by isotropic scale.
Hacky and brittle!
Returns a copy!
"""
new = self.copy()
new.mus = [scale * mu for mu in self.mus]
new.fis = [scale * scale * fi for fi in self.fis]
return new
def add_gaussian(self, alpha, mu, fi):
assert mu.shape == (self.D,)
assert fi.shape == (self.D, self.D)
self.alphas.append(alpha)
self.mus.append(mu)
self.fis.append(fi)
self.K += 1
def convolve(self, other):
"""
Convolve a mixture with another mixture.
Might really be *correlate* rather than convolve!
Returns a new object; doesn't work in place.
"""
assert self.D == other.D
new = mixture_of_gaussians(self.D)
for ks in range(self.K):
for ko in range(other.K):
new.add_gaussian(self.alphas[ks] * other.alphas[ko],
self.mus[ks] + other.mus[ko],
self.fis[ks] + other.fis[ko])
return new
def render(self, positions):
N, D = positions.shape
assert D == self.D
densities= numpy.zeros(N)
for k in range(self.K):
gaus_k = multivariate_normal(mean=self.mus[k], cov=self.fis[k])
pdf_k = gaus_k.pdf(positions)
densities += self.alphas[k] * pdf_k
return densities
def get_total_mass(self):
return numpy.sum(self.alphas)
class galaxy_model_3d(mixture_of_gaussians):
"""
the class represents a 3D dimensional galaxy model which is constructed from Gaussians
"""
def __init__(self):
super(galaxy_model_3d, self).__init__(3)
def copy(self):
"""
This code is brittle because we are not using proper setters (or adders) to construct the mixture.
"""
new = galaxy_model_3d()
for alpha, mu, fi in zip(self.alphas, self.mus, self.fis):
new.add_gaussian(alpha, mu.copy(), fi.copy())
return new
def project_2d(self, xi_hat, eta_hat):
assert xi_hat.shape == (self.D,)
assert eta_hat.shape == (self.D,)
assert numpy.isclose(numpy.dot(xi_hat, xi_hat), 1.0)
assert numpy.isclose(numpy.dot(eta_hat, eta_hat), 1.0)
assert numpy.isclose(numpy.dot(xi_hat, eta_hat), 0.0)
projection_matrix = numpy.vstack((xi_hat, eta_hat))
mixture_2d = mixture_of_gaussians(2)
for k in range(self.K):
m = numpy.dot(projection_matrix, self.mus[k])
V = numpy.dot(numpy.dot(projection_matrix, self.fis[k]), projection_matrix.T)
mixture_2d.add_gaussian(self.alphas[k], m, V)
return mixture_2d
def render_2d_image(self, xi_hat, eta_hat, xs, ys, intensity=1., psf=None):
Y, X = numpy.meshgrid(ys, xs)
xs_flatten = X.flatten()
ys_flatten = Y.flatten()
positions_flatten = numpy.vstack((xs_flatten, ys_flatten)).T
mixture_2d = self.project_2d(xi_hat, eta_hat) * intensity
if psf is not None:
mixture_2d = mixture_2d.convolve(psf)
densities_flatten = mixture_2d.render(positions_flatten)
densities = numpy.reshape(densities_flatten, X.shape)
return densities
def _construct_covariance_from_vector(self, vector):
assert len(vector)==6
assert numpy.isreal(vector).all()
fi = numpy.zeros((3,3))
fi[numpy.diag_indices(3)] = vector[:3]
fi[numpy.tril_indices(3, -1)] += vector[3:]
covariance = numpy.dot(fi, fi.T)
return covariance
def set_parameters_from_vector(self, vector):
assert len(vector) % 10 == 0
self.__init__()
for i in xrange(0, len(vector), 10):
parameters = vector[i:i+10]
alpha = parameters[0]
mu = parameters[1:4]
fi = self._construct_covariance_from_vector(parameters[4:])
## old covariance construction ##
#fi = numpy.zeros((3,3))
#fi[numpy.diag_indices(3)] = parameters[4:7]
#fi[numpy.triu_indices(3, 1)] += parameters[7:10]
#fi[numpy.tril_indices(3, -1)] += parameters[7:10]
self.add_gaussian(alpha, mu, fi)
def get_parameters_vector(self):
vector = numpy.zeros(10 * self.K)
for k in range(self.K):
i = 10 * k
vector[i] = self.alphas[k]
vector[i+1:i+4] = self.mus[k]
vector[i+4:i+7] = (self.fis[k])[numpy.diag_indices(3)]
vector[i+7:i+10] = (self.fis[k])[numpy.triu_indices(3, 1)]
return vector
def get_ln_prior(self):
"""
Penalize bad (or impossible) condition numbers.
"""
lnp = 0.
for fi in self.fis:
try:
eigs = numpy.linalg.eigvalsh(fi)
except:
print "eigs did not converge"
return -numpy.Inf
if numpy.any(eigs <= 0.):
return -numpy.Inf
lnp -= numpy.log(numpy.max(eigs) / numpy.min(eigs)) # condition number!
return lnp
def choose_random_projection():
"""
Generate two orthogonal normal vectors, drawn isotropically from the sphere.
"""
xhat = numpy.random.normal(size=3)
xhat /= numpy.sqrt(numpy.dot(xhat, xhat))
yhat = numpy.random.normal(size=3)
yhat -= numpy.dot(xhat, yhat) * xhat
yhat /= numpy.sqrt(numpy.dot(yhat, yhat))
return xhat, yhat
class image_and_model(object):
"""
This class represents a 2D image of a galaxy and holds all the parameters that convert the 3D model to this
"""
def __init__(self):
self.data = None
self.synthetic = 0.
self.ivar = None
self.shape = None
self.psf = None
self.parameters = {'alpha' : None,
'beta' : None,
'gamma' : None,
'intensity' : None,
'scale' : None,
'xshift' : None,
'yshift' : None,
'bg' : None}
def set_data(self, data):
if self.shape is None:
self.shape = data.shape
else:
assert data.shape == self.shape
self.data = data
def set_ivar(self, ivar):
"""
Set the estimated inverse variance map for the image.
"""
if self.shape is None:
self.shape = ivar.shape
else:
assert ivar.shape == self.shape
self.ivar = ivar
def set_shape(self, shape):
assert len(shape) == 2
self.shape = shape
self.synthetic = 0.
return None
def set_psf(self, psf):
assert type(psf) == mixture_of_gaussians
assert psf.D == 2
self.psf = psf
self.synthetic = 0.
def set_parameters(self, **kwargs):
if kwargs is not None:
for key, value in kwargs.iteritems():
assert key in self.parameters.keys()
self.parameters[key] = value
self.synthetic = 0.
def set_parameters_from_vector(self, par_vector):
self.parameters['alpha'] = par_vector[0]
self.parameters['beta'] = par_vector[1]
self.parameters['gamma'] = par_vector[2]
self.parameters['intensity'] = par_vector[3]
self.parameters['scale'] = par_vector[4]
self.parameters['xshift'] = par_vector[5]
self.parameters['yshift'] = par_vector[6]
self.parameters['bg'] = par_vector[7]
self.synthetic = 0.
def set_galaxy(self, galaxy):
assert type(galaxy) == galaxy_model_3d
self.galaxy = galaxy
self.synthetic = 0.
def get_data(self):
return self.data
def get_ivar(self):
return self.ivar
def get_synthetic(self):
if numpy.isscalar(self.synthetic):
if self.synthetic != 0.0:
self.synthetic = 0
self.construct_synthetic()
return self.synthetic
def get_shape(self):
return self.shape
def get_parameters(self):
return self.parameters
def get_parameters_vector(self):
return numpy.array((self.parameters['alpha'],
self.parameters['beta'],
self.parameters['gamma'],
self.parameters['intensity'],
self.parameters['scale'],
self.parameters['xshift'],
self.parameters['yshift'],
self.parameters['bg']))
def get_parameter(self, key):
assert key in self.parameters.keys()
return self.parameters[key]
def _add_to_synthetic(self, contribution):
self.synthetic += contribution
def construct_synthetic(self, xi_hat=None, eta_hat=None):
nx, ny = self.shape
xs = (numpy.arange(nx) - self.parameters['xshift']) * self.parameters['scale'] # kpc
ys = (numpy.arange(ny) - self.parameters['yshift']) * self.parameters['scale'] # kpc
if xi_hat == None and eta_hat == None:
r = rotation_3d()
r_mat = r.return_rotation_matrix(self.parameters['alpha'], self.parameters['beta'], self.parameters['gamma'])
xi_hat = r_mat[0]
eta_hat = r_mat[1]
self._add_to_synthetic(self.parameters['bg'])
self._add_to_synthetic(self.galaxy.render_2d_image(xi_hat, eta_hat, xs, ys,
intensity=self.parameters['intensity'],
psf=self.psf.rescale(self.parameters['scale'])))
def get_chi_squared(self):
return numpy.sum(self.ivar * (self.data - self.get_synthetic()) ** 2)
def get_chi_vector(self):
return (numpy.sqrt(self.ivar) * (self.data - self.get_synthetic())).flatten()
def get_ln_likelihood(self):
return -0.5 * self.get_chi_squared()
def get_ln_prior(self):
return 0. # no beliefs
def __call__(self, parameter_vector):
self.set_parameters_from_vector(parameter_vector)
return self.get_chi_squared()
class album_and_model(object):
"""
This class represents a set of images that come from the same 3D model with different parameters
"""
def __init__(self):
self.images = []
self.galaxy = None
def __len__(self):
return len(self.images)
def __getitem__(self, i):
return self.images[i]
def __iter__(self):
for image in self.images:
yield image
def add_image(self, image):
assert type(image) == image_and_model
self.images.append(image)
def get_all_images(self):
return self.images
def set_galaxy(self, galaxy):
self.galaxy = galaxy
for image in self.images:
image.set_galaxy(galaxy)
def get_chi_squared(self):
chisquared = 0.
for image in self.images:
chisquared += image.get_chisquared()
return chisquared
def get_ln_likelihood(self):
lnlike = 0.
for image in self.images:
lnlike += image.get_ln_likelihood()
return lnlike
def get_ln_prior(self):
lnp = self.galaxy.get_ln_prior()
for image in self.images:
lnp += image.get_ln_prior()
return lnp
def get_ln_posterior(self):
lnp = self.get_ln_prior()
if numpy.isfinite(lnp):
lnp += self.get_ln_likelihood()
return lnp
def __call__(self, galparvec):
"""
Return -2 * ln_prob, which is something we can *minimize*.
"""
galaxy = galaxy_model_3d()
galaxy.set_parameters_from_vector(galparvec)
self.set_galaxy(galaxy) # must use `set_galaxy()` to propagate to images
return -2. * self.get_ln_posterior()
class illustris_model_and_image(object):
def __init__(self, file_path):
assert h5py.is_hdf5(file_path)
self.file_path = file_path
f = h5py.File(file_path, "r")
stars_snap = f['PartType4']
stars_coords = stars_snap['Coordinates']
stars_mags = stars_snap['GFM_StellarPhotometrics']
self.stars_coords = (stars_coords - numpy.mean(stars_coords, axis=0)) / numpy.std(stars_coords, axis=0)
self.stars_mags = {'U': stars_mags[:,0],
'B': stars_mags[:,1],
'V': stars_mags[:,2],
'K': stars_mags[:,3],
'g': stars_mags[:,4],
'r': stars_mags[:,5],
'i': stars_mags[:,6],
'z': stars_mags[:,7]}
self.image = 0.
self.image_parameters = {'alpha' : None,
'beta' : None,
'gamma' : None,
'intensity' : None,
'scale' : None,
'xshift' : None,
'yshift' : None,
'bg' : None,
'psf_size': None}
def set_image_shape(self, shape):
assert len(shape) == 2
self.shape = shape
def set_image_parameters(self, **kwargs):
if kwargs is not None:
for key, value in kwargs.iteritems():
assert key in self.image_parameters.keys()
self.image_parameters[key] = value
self.image = 0.
def get_image(self):
return self.image
def get_shape(self):
return self.shape
def _add_to_image(self, contribution):
self.image += contribution
def render_2d_image(self, xi_hat, eta_hat, xs, ys, band_mag='g'):
projection_matrix = numpy.vstack((xi_hat, eta_hat))
stars_coords_2d = numpy.dot(self.stars_coords, projection_matrix.T)
H, xedges, yedges = numpy.histogram2d(stars_coords_2d[:,0],
stars_coords_2d[:,1],
[xs, ys],
normed=True,
weights= 10 ** (self.stars_mags[band_mag]/-2.5))
return H
def construct_image(self, xi_hat=None, eta_hat=None):
nx, ny = self.shape
xs = (numpy.arange(nx + 1) - self.image_parameters['xshift']) * self.image_parameters['scale'] # kpc
ys = (numpy.arange(ny + 1) - self.image_parameters['yshift']) * self.image_parameters['scale'] # kpc
if xi_hat == None and eta_hat == None:
r = rotation_3d()
r_mat = r.return_rotation_matrix(self.image_parameters['alpha'], self.image_parameters['beta'], self.image_parameters['gamma'])
xi_hat = r_mat[0]
eta_hat = r_mat[1]
H = self.render_2d_image(xi_hat, eta_hat, xs, ys)
if self.image_parameters['psf_size'] != None:
H = gaussian_filter(H, self.image_parameters['psf_size'])
if self.image_parameters['bg'] != None:
self._add_to_image(self.image_parameters['bg'])
self._add_to_image(H * self.image_parameters['intensity']) | mit |
SKIRT/PTS | do/eagle/plotresources.py | 1 | 4335 | #!/usr/bin/env python
# -*- coding: utf8 -*-
# *****************************************************************
# ** PTS -- Python Toolkit for working with SKIRT **
# ** © Astronomical Observatory, Ghent University **
# *****************************************************************
## \package pts.do.eagle.plotresources Plot a histogram of the resources used for a selection of SKIRT-run simulations.
#
# This script plots two histograms of the resources used for the SKIRT simulations corresponding to
# a set of SKIRT-run records:
# - The computation time, determined as the product of the wall-time and the number of parallel processes.
# - The peak memory usage of the root process
#
# The script expects a single command-line argument specifying the label for the records to be considered.
#
# -----------------------------------------------------------------
# Import standard modules
import os.path
import sys
import numpy as np
import matplotlib.pyplot as plt
# Import the relevant PTS classes and modules
from pts.core.basics.log import log
from pts.eagle import config
from pts.eagle import database
from pts.eagle.skirtrun import SkirtRun
# -----------------------------------------------------------------
def advance(db, label, stagefrom, stageto):
with db.transaction():
# get the eligible records
records = db.select("label=? and stage=? and status='succeeded'", (label, stagefrom))
if len(records) > 0:
# show some info and ask for confirmation
print "There are {} successful SKIRT-runs with label {} at stage {}.".format(len(records), label, stagefrom)
confirm = raw_input("--> Would you like to advance these to stage {}? [y/n]: ".format(stageto))
# advance stage if user confirmed
if confirm.lower().startswith('y'):
db.updatestage(records, stageto)
print "Advancing {} SKIRT-runs from stage {} to {}".format(len(records), stagefrom, stageto)
else:
print "Update was rejected"
# -----------------------------------------------------------------
# get the command-line arguments
if len(sys.argv)!=2: raise ValueError("This script expects a single command-line argument: label")
label = sys.argv[1]
# get the eligible records
db = database.Database()
records = db.select("((stage='simulate' and status='succeeded') or (stage in ('observe', 'store', 'completed')))" \
" and label=?", (label,))
db.close()
size = len(records)
if size==0: raise ValueError("There are no simulated records with label " + label)
# assemble the statistics for all records
log.info("Assembling statistics for {} SKIRT simulations...".format(size))
time = np.zeros(size)
memory = np.zeros(size)
for index in range(size):
logfilepath = SkirtRun(records[index]["runid"]).simulation().logfilepath()
for line in open(logfilepath):
if " Finished simulation " in line:
segments = line.split()
processes = float(segments[segments.index("processes")-1])
timeindex = segments.index("s") if "s" in segments else segments.index("s.")
walltime = float(segments[timeindex-1])
time[index] = processes * walltime
if " Available memory: " in line:
segments = line.split()
memory[index] = float(segments[segments.index("usage:")+1])
# construct the time histogram
log.info("Constructing plots...")
figure = plt.figure(figsize=(8,8))
plt.xlabel('Computation time (min)', fontsize='medium')
plt.ylabel('Simulation count', fontsize='medium')
plt.hist(time/60, bins=25)
plt.vlines(np.mean(time)/60, 0, 5, colors='r')
plotfilepath = os.path.join(config.plots_path, label + "_time_hist.pdf")
plt.savefig(plotfilepath)
log.info("Created time histogram " + plotfilepath)
# construct the memory histogram
figure = plt.figure(figsize=(8,8))
plt.xlabel('Peak memory usage (GB)', fontsize='medium')
plt.ylabel('Simulation count', fontsize='medium')
plt.hist(memory, bins=25)
plt.vlines(np.max(memory), 0, 5, colors='r')
plotfilepath = os.path.join(config.plots_path, label + "_memory_hist.pdf")
plt.savefig(plotfilepath)
log.info("Created memory histogram " + plotfilepath)
# -----------------------------------------------------------------
| agpl-3.0 |
ChanChiChoi/scikit-learn | examples/decomposition/plot_pca_vs_lda.py | 182 | 1743 | """
=======================================================
Comparison of LDA and PCA 2D projection of Iris dataset
=======================================================
The Iris dataset represents 3 kind of Iris flowers (Setosa, Versicolour
and Virginica) with 4 attributes: sepal length, sepal width, petal length
and petal width.
Principal Component Analysis (PCA) applied to this data identifies the
combination of attributes (principal components, or directions in the
feature space) that account for the most variance in the data. Here we
plot the different samples on the 2 first principal components.
Linear Discriminant Analysis (LDA) tries to identify attributes that
account for the most variance *between classes*. In particular,
LDA, in contrast to PCA, is a supervised method, using known class labels.
"""
print(__doc__)
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.decomposition import PCA
from sklearn.lda import LDA
iris = datasets.load_iris()
X = iris.data
y = iris.target
target_names = iris.target_names
pca = PCA(n_components=2)
X_r = pca.fit(X).transform(X)
lda = LDA(n_components=2)
X_r2 = lda.fit(X, y).transform(X)
# Percentage of variance explained for each components
print('explained variance ratio (first two components): %s'
% str(pca.explained_variance_ratio_))
plt.figure()
for c, i, target_name in zip("rgb", [0, 1, 2], target_names):
plt.scatter(X_r[y == i, 0], X_r[y == i, 1], c=c, label=target_name)
plt.legend()
plt.title('PCA of IRIS dataset')
plt.figure()
for c, i, target_name in zip("rgb", [0, 1, 2], target_names):
plt.scatter(X_r2[y == i, 0], X_r2[y == i, 1], c=c, label=target_name)
plt.legend()
plt.title('LDA of IRIS dataset')
plt.show()
| bsd-3-clause |
kain88-de/mdanalysis | package/MDAnalysis/analysis/polymer.py | 1 | 5509 | # -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
#
# MDAnalysis --- http://www.mdanalysis.org
# Copyright (c) 2006-2016 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
"""
Polymer analysis --- :mod:`MDAnalysis.analysis.polymer`
=======================================================
:Author: Richard J. Gowers
:Year: 2015
:Copyright: GNU Public License v3
This module contains various commonly used tools in analysing polymers.
"""
from __future__ import division, absolute_import
from six.moves import range
import numpy as np
import logging
from .. import NoDataError
from ..lib.distances import calc_bonds
from .base import AnalysisBase
logger = logging.getLogger(__name__)
class PersistenceLength(AnalysisBase):
r"""Calculate the persistence length for polymer chains
The persistence length is the length at which two points on the polymer
chain become decorrelated.
Notes
-----
This analysis requires that the trajectory supports indexing
.. versionadded:: 0.13.0
"""
def __init__(self, atomgroups, **kwargs):
"""Calculate the persistence length for polymer chains
Parameters
----------
atomgroups : list
List of atomgroups. Each atomgroup should represent a single
polymer chain, ordered in the correct order.
start : int, optional
First frame of trajectory to analyse, Default: None becomes 0.
stop : int, optional
Last frame of trajectory to analyse, Default: None becomes
n_frames.
step : int, optional
Frame index to stop analysis. Default: None becomes
n_frames. Iteration stops *before* this frame number.
"""
super(PersistenceLength, self).__init__(
atomgroups[0].universe.trajectory, **kwargs)
self._atomgroups = atomgroups
# Check that all chains are the same length
lens = [len(ag) for ag in atomgroups]
chainlength = len(atomgroups[0])
if not all(l == chainlength for l in lens):
raise ValueError("Not all AtomGroups were the same size")
self._results = np.zeros(chainlength - 1, dtype=np.float32)
def _single_frame(self):
# could optimise this by writing a "self dot array"
# we're only using the upper triangle of np.inner
# function would accept a bunch of coordinates and spit out the
# decorrel for that
n = len(self._atomgroups[0])
for chain in self._atomgroups:
# Vector from each atom to next
vecs = chain.positions[1:] - chain.positions[:-1]
# Normalised to unit vectors
vecs /= np.sqrt((vecs * vecs).sum(axis=1))[:, None]
inner_pr = np.inner(vecs, vecs)
for i in range(n-1):
self._results[:(n-1)-i] += inner_pr[i, i:]
def _conclude(self):
n = len(self._atomgroups[0])
norm = np.linspace(n - 1, 1, n - 1)
norm *= len(self._atomgroups) * self.n_frames
self.results = self._results / norm
self._calc_bond_length()
def _calc_bond_length(self):
"""calculate average bond length"""
bs = []
for ag in self._atomgroups:
pos = ag.positions
b = calc_bonds(pos[:-1], pos[1:]).mean()
bs.append(b)
self.lb = np.mean(bs)
def perform_fit(self):
"""Fit the results to an exponential decay"""
try:
self.results
except AttributeError:
raise NoDataError("Use the run method first")
self.x = np.arange(len(self.results)) * self.lb
self.lp = fit_exponential_decay(self.x, self.results)
self.fit = np.exp(-self.x/self.lp)
def plot(self, ax=None):
"""Oooh fancy"""
import matplotlib.pyplot as plt
if ax is None:
ax = plt.gca()
ax.plot(self.x, self.results, 'ro', label='Result')
ax.plot(self.x, self.fit, label='Fit')
ax.set(xlabel='x', ylabel='C(x)', xlim=[0.0, 40 * self.lb])
ax.legend(loc='best')
return ax
def fit_exponential_decay(x, y):
r"""Fit a function to an exponential decay
.. math:: y = \exp(-x/a)
Parameters
----------
x, y : array_like
The two arrays of data
Returns
-------
a : float
The coefficient *a* for this decay
Notes
-----
This function assumes that data starts at 1.0 and decays to 0.0
Requires scipy
"""
from scipy.optimize import curve_fit
def expfunc(x, a):
return np.exp(-x/a)
a = curve_fit(expfunc, x, y)[0][0]
return a
| gpl-2.0 |
sprax/python | nlp/plot_classifier_chain_yeast.py | 32 | 4547 | """
============================
Classifier Chain
============================
Example of using classifier chain on a multilabel dataset.
For this example we will use the `yeast
<http://mldata.org/repository/data/viewslug/yeast>`_ dataset which contains
2417 datapoints each with 103 features and 14 possible labels. Each
data point has at least one label. As a baseline we first train a logistic
regression classifier for each of the 14 labels. To evaluate the performance of
these classifiers we predict on a held-out test set and calculate the
:ref:`jaccard similarity score <jaccard_similarity_score>`.
Next we create 10 classifier chains. Each classifier chain contains a
logistic regression model for each of the 14 labels. The models in each
chain are ordered randomly. In addition to the 103 features in the dataset,
each model gets the predictions of the preceding models in the chain as
features (note that by default at training time each model gets the true
labels as features). These additional features allow each chain to exploit
correlations among the classes. The Jaccard similarity score for each chain
tends to be greater than that of the set independent logistic models.
Because the models in each chain are arranged randomly there is significant
variation in performance among the chains. Presumably there is an optimal
ordering of the classes in a chain that will yield the best performance.
However we do not know that ordering a priori. Instead we can construct an
voting ensemble of classifier chains by averaging the binary predictions of
the chains and apply a threshold of 0.5. The Jaccard similarity score of the
ensemble is greater than that of the independent models and tends to exceed
the score of each chain in the ensemble (although this is not guaranteed
with randomly ordered chains).
"""
print(__doc__)
# Author: Adam Kleczewski
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.multioutput import ClassifierChain
from sklearn.model_selection import train_test_split
from sklearn.multiclass import OneVsRestClassifier
from sklearn.metrics import jaccard_similarity_score
from sklearn.linear_model import LogisticRegression
from sklearn.datasets import fetch_mldata
# Load a multi-label dataset
yeast = fetch_mldata('yeast')
X = yeast['data']
Y = yeast['target'].transpose().toarray()
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=.2,
random_state=0)
# Fit an independent logistic regression model for each class using the
# OneVsRestClassifier wrapper.
ovr = OneVsRestClassifier(LogisticRegression())
ovr.fit(X_train, Y_train)
Y_pred_ovr = ovr.predict(X_test)
ovr_jaccard_score = jaccard_similarity_score(Y_test, Y_pred_ovr)
# Fit an ensemble of logistic regression classifier chains and take the
# take the average prediction of all the chains.
chains = [ClassifierChain(LogisticRegression(), order='random', random_state=i)
for i in range(10)]
for chain in chains:
chain.fit(X_train, Y_train)
Y_pred_chains = np.array([chain.predict(X_test) for chain in
chains])
chain_jaccard_scores = [jaccard_similarity_score(Y_test, Y_pred_chain >= .5)
for Y_pred_chain in Y_pred_chains]
Y_pred_ensemble = Y_pred_chains.mean(axis=0)
ensemble_jaccard_score = jaccard_similarity_score(Y_test,
Y_pred_ensemble >= .5)
model_scores = [ovr_jaccard_score] + chain_jaccard_scores
model_scores.append(ensemble_jaccard_score)
model_names = ('Independent',
'Chain 1',
'Chain 2',
'Chain 3',
'Chain 4',
'Chain 5',
'Chain 6',
'Chain 7',
'Chain 8',
'Chain 9',
'Chain 10',
'Ensemble')
x_pos = np.arange(len(model_names))
# Plot the Jaccard similarity scores for the independent model, each of the
# chains, and the ensemble (note that the vertical axis on this plot does
# not begin at 0).
fig, ax = plt.subplots(figsize=(7, 4))
ax.grid(True)
ax.set_title('Classifier Chain Ensemble Performance Comparison')
ax.set_xticks(x_pos)
ax.set_xticklabels(model_names, rotation='vertical')
ax.set_ylabel('Jaccard Similarity Score')
ax.set_ylim([min(model_scores) * .9, max(model_scores) * 1.1])
colors = ['r'] + ['b'] * len(chain_jaccard_scores) + ['g']
ax.bar(x_pos, model_scores, alpha=0.5, color=colors)
plt.tight_layout()
plt.show()
| lgpl-3.0 |
flowmatters/veneer-py | veneer/general.py | 1 | 36068 | try:
from urllib2 import quote
import httplib as hc
except:
from urllib.request import quote
import http.client as hc
import json
import re
from .server_side import VeneerIronPython
from .utils import SearchableList, _stringToList, read_veneer_csv, objdict#, deprecate_async
import pandas as pd
# Source
from . import extensions
from .ts_naming_functions import *
PRINT_URLS = False
PRINT_ALL = False
PRINT_SCRIPTS = False
MODEL_TABLES = ['fus']
def log(text):
import sys
print('\n'.join(_stringToList(text)))
sys.stdout.flush()
def _veneer_url_safe_id_string(s):
return s.replace('#', '').replace('/', '%2F').replace(':', '')
#@deprecate_async
class Veneer(object):
'''
Acts as a high level client to the Veneer web service within eWater Source.
'''
def __init__(self, port=9876, host='localhost', protocol='http', prefix='', live=True):
'''
Instantiate a new Veneer client.
Parameters:
port, host, protocol: Connection information for running Veneer service (default 9876, localhost, http)
prefix: path prefix for all queries. Useful if Veneer is running behind some kind of proxy
live: Connecting to a live Veneer service or a statically served copy of the results? Default: True
'''
self.port = port
self.host = host
self.protocol = protocol
self.prefix = prefix
self.base_url = "%s://%s:%d%s" % (protocol, host, port, prefix)
self.live_source = live
if self.live_source:
self.data_ext = ''
self.img_ext = ''
else:
if protocol and protocol.startswith('file'):
self.base_url = '%s://%s'%(protocol,prefix)
self.data_ext='.json'
self.img_ext='.png'
self.model = VeneerIronPython(self)
def shutdown(self):
'''
Stop the Veneer server (and shutdown the command line if applicable)
'''
try:
self.post_json('/shutdown')
except ConnectionResetError:
return
raise Exception(
"Connection didn't reset. Shutdown may not have worked")
def _replace_inf(self, text):
return re.sub('":(-?)INF', '":\\1Infinity', text)
def url(self,url):
if self.protocol=='file':
return self.prefix + url
return '%s://%s:%d%s/%s'%(self.protocol,self.host,self.port,self.prefix,url)
def retrieve_json(self, url):
'''
Retrieve data from the Veneer service at the given url path.
url: Path to required resource, relative to the root of the Veneer service.
'''
query_url = self.prefix + url + self.data_ext
if PRINT_URLS:
print("*** %s - %s ***" % (url, query_url))
if self.protocol == 'file':
text = open(query_url).read()
else:
conn = hc.HTTPConnection(self.host, port=self.port)
conn.request('GET', quote(query_url))
resp = conn.getresponse()
text = resp.read().decode('utf-8')
text = self._replace_inf(text)
if PRINT_ALL:
print(json.loads(text))
print("")
try:
return json.loads(text)
except Exception as e:
raise Exception(
'Error parsing response as JSON. Retrieving %s and received:\n%s' % (url, text[:100]))
def retrieve_csv(self, url):
'''
Retrieve data from the Veneer service, at the given url path, in CSV format.
url: Path to required resource, relative to the root of the Veneer service.
NOTE: CSV responses are currently only available for time series results
'''
query_url = self.prefix + url
if PRINT_URLS:
print("*** %s - %s ***" % (url, query_url))
if self.protocol == 'file':
query_url += '.csv'
print(query_url)
text = open(query_url)
else:
conn = hc.HTTPConnection(self.host, port=self.port)
conn.request('GET', quote(url + self.data_ext),
headers={"Accept": "text/csv"})
resp = conn.getresponse()
text = resp.read().decode('utf-8')
result = read_veneer_csv(text)
if PRINT_ALL:
print(result)
print("")
return result
# @deprecate_async
def update_json(self, url, data, run_async=False):
'''
Issue a PUT request to the Veneer service to update the data held at url
url: Path to required resource, relative to the root of the Veneer service.
data: Data to update.
NOTE: This method will typically be used internally, by other Veneer methods.
Usually, you will want to call one of these other methods to update something specific.
For example, configure_recording to enable and disable time series recorders in the model.
'''
return self.send_json(url, data, 'PUT', run_async)
# @deprecate_async
def send_json(self, url, data, method, run_async=False):
payload = json.dumps(data)
headers = {'Content-type': 'application/json',
'Accept': 'application/json'}
return self.send(url, method, payload, headers, run_async)
# @deprecate_async
def post_json(self, url, data=None, run_async=False):
return self.send_json(url, data, 'POST', run_async)
# @deprecate_async
def send(self, url, method, payload=None, headers={}, run_async=False):
conn = hc.HTTPConnection(self.host, port=self.port)
conn.request(method, url, payload, headers=headers)
if run_async:
return conn
resp = conn.getresponse()
code = resp.getcode()
if code == 302:
return code, resp.getheader('Location')
elif code == 200:
resp_body = resp.read().decode('utf-8')
return code, (json.loads(resp_body) if len(resp_body) else None)
else:
return code, resp.read().decode('utf-8')
return conn
def status(self):
return self.retrieve_json('/')
# @deprecate_async
def run_server_side_script(self, script, run_async=False):
'''
Run an IronPython script within Source.
Requires Veneer to be running with 'Allow Scripts' option.
script: the full text of an IronPython script to execute from within Source.
NOTE: In many cases, it is possible (and desirable) to call helper methods within Veneer.model,
rather than write your own IronPython script.
'''
if PRINT_SCRIPTS:
print(script)
result = self.post_json('/ironpython', {'Script': script},
run_async=run_async)
if run_async:
return result
code, data = result
if code == 403:
raise Exception('Script disabled. Enable scripting in Veneer')
return data
def source_version(self):
'''
Returns the version of Source we are connected to, if available.
Returns list of four integers [major,minor,build,revision] or
Returns [0,0,0,0] if unknown.
'''
info = self.retrieve_json('/')
if hasattr(info,'keys'):
return [int(i) for i in info['SourceVersion'].split('.')]
return [0,0,0,0]
def scenario_info(self):
return self.retrieve_json('/')
def configure_recording(self, enable=[], disable=[],run_async=False):
'''
Enabled and disable time series recording in the Source model.
enable: List of time series selectors to enable,
disable: List of time series selectors to disable
Note: Each time series selector is a python dictionary object with up to four keys:
* NetworkElement
* RecordingElement
* RecordingVariable
* FunctionalUnit
These are used to match time series available from the Source model. A given selector may match
multiple time series. For example, a selector of {'RecordingVariable':'Downstream Flow Volume'}
will match Downstream Flow Volume from all nodes and links.
Any empty dictionary {} will match ALL time series in the model.
So, for example, you could disable ALL recording in the model with
v = Veneer()
v.configure_recording(disable=[{}])
Note, the time series selectors in enable and disable may both match the same time series in some cases.
In this case, the 'enable' will take effect.
'''
def get_many(src, keys, default):
return [src.get(k, default) for k in keys]
def translate(rule):
keys = ['NetworkElement', 'RecordingElement', 'RecordingVariable']
vals = get_many(rule, keys, '')
if vals[2] == '':
vals[2] = vals[1]
if 'FunctionalUnit' in rule:
vals[0] += '@@' + rule['FunctionalUnit']
all_known_keys = set(['FunctionalUnit'] + keys)
invalid_keys = set(rule.keys()) - (all_known_keys)
if len(invalid_keys):
raise Exception("Unknown recording keys: %s" %
(str(invalid_keys)))
return 'location/%s/element/%s/variable/%s' % tuple(vals)
modifier = {'RecordNone': [translate(r) for r in disable],
'RecordAll': [translate(r) for r in enable]}
return self.update_json('/recorders', modifier,run_async=run_async)
#@deprecate_async
def run_model(self, params=None, start=None, end=None, run_async=False, name=None, **kwargs):
'''
Trigger a run of the Source model
params: Python dictionary of parameters to pass to Source. Should match the parameters expected
of the running configuration. (If you just want to set the start and end date of the simulation,
use the start and end parameters
start, end: The start and end date of the simulation. Should be provided as Date objects or as text in the dd/mm/yyyy format
run_async: (default False). If True, the method will return immediately rather than waiting for the simulation to finish.
Useful for triggering parallel runs. Method will return a connection object that can then be queried to know
when the run has finished.
name: Name to assign to run in Source results (default None: let Source name using default strategy)
kwargs: optional named parameters to be used to update the params dictionary
In the default behaviour (run_async=False), this method will return once the Source simulation has finished, and will return
the URL of the results set in the Veneer service
'''
conn = hc.HTTPConnection(self.host, port=self.port)
if params is None:
params = {}
params.update(kwargs)
if not start is None:
params['StartDate'] = to_source_date(start)
if not end is None:
params['EndDate'] = to_source_date(end)
if not name is None:
params['_RunName'] = name
# conn.request('POST','/runs',json.dumps({'parameters':params}),headers={'Content-type':'application/json','Accept':'application/json'})
conn.request('POST', '/runs', json.dumps(params),
headers={'Content-type': 'application/json', 'Accept': 'application/json'})
if run_async:
return conn
return self._wait_for_run(conn)
def _wait_for_run(self,conn):
resp = conn.getresponse()
code = resp.getcode()
if code == 302:
return code, resp.getheader('Location')
elif code == 200:
return code, None
elif code == 500:
error = json.loads(resp.read().decode('utf-8'))
raise Exception('\n'.join([error['Message'], error['StackTrace']]))
else:
return code, resp.read().decode('utf-8')
def drop_run(self, run='latest'):
'''
Tell Source to drop/delete a specific set of results from memory.
run: Run number to delete. Default ='latest'. Valid values are 'latest' and integers from 1
'''
assert self.live_source
conn = hc.HTTPConnection(self.host, port=self.port)
conn.request('DELETE', '/runs/%s' % str(run))
resp = conn.getresponse()
code = resp.getcode()
return code
def drop_all_runs(self):
'''
Tell Source to drop/delete ALL current run results from memory
'''
runs = self.retrieve_runs()
while len(runs) > 0:
self.drop_run(int(runs[-1]['RunUrl'].split('/')[-1]))
runs = self.retrieve_runs()
def retrieve_runs(self):
'''
Retrieve the list of available runs.
Individual runs can be used with retrieve_run to retrieve a summary of results
'''
return self.retrieve_json('/runs')
def retrieve_run(self, run='latest'):
'''
Retrieve a results summary for a particular run.
This will include references to all of the time series results available for the run.
run: Run to retrieve. Either 'latest' (default) or an integer run number from 1
'''
run = run.split('/')[-1]
if run == 'latest' and not self.live_source:
all_runs = self.retrieve_json('/runs')
result = self.retrieve_json(all_runs[-1]['RunUrl'])
else:
result = self.retrieve_json('/runs/%s' % str(run))
result['Results'] = SearchableList(result['Results'])
return result
def network(self):
'''
Retrieve the network from Veneer.
The result will be a Python dictionary in GeoJSON conventions.
The 'features' key of the returned dictionary will be a SearchableList, suitable for querying for
different properties - eg to filter out just nodes, or links, or catchments.
Example: Find all the node names in the current Source model
v = Veneer()
network = v.network()
nodes = network['features'].find_by_feature_type('node')
node_names = nodes._unique_values('name')
'''
res = self.retrieve_json('/network')
res['_v']=self
return _extend_network(res)
def model_table(self,table='fus'):
df = pd.read_csv(self.retrieve_csv('/tables/%s'%table))
df = df.set_index('Catchment')
return df
def functions(self):
'''
Return a SearchableList of the functions in the Source model.
'''
return SearchableList(self.retrieve_json('/functions'))
def update_function(self, fn, value):
'''
Update a function within Source
fn: str, name of function to update.
'''
fn = fn.split('/')[-1]
url = '/functions/' + fn.replace('$', '')
payload = {
'Name': fn,
'Expression': str(value)
}
return self.update_json(url, payload)
def variables(self):
'''
Return a SearchableList of the function variables in the Source model
'''
return SearchableList(self.retrieve_json('/variables'))
def variable(self, name):
'''
Returns details of a particular variable
'''
name = name.replace('$', '')
return self.retrieve_json('/variables/%s' % name)
def variable_time_series(self, name):
'''
Returns time series for a particular variable
'''
name = name.replace('$', '')
url = '/variables/%s/TimeSeries' % name
result = self.retrieve_json(url)
df = pd.DataFrame(self.convert_dates(result['Events'])).set_index(
'Date').rename({'Value': result['Name']})
extensions._apply_time_series_helpers(df)
return df
def update_variable_time_series(self, name, timeseries):
name = name.replace('$', '')
url = '/variables/%s/TimeSeries' % name
if hasattr(timeseries, 'columns'):
date_format = '%m/%d/%Y'
payload = {}
events = zip(timeseries.index, timeseries[timeseries.columns[0]])
payload['Events'] = [
{'Date': d.strftime(date_format), 'Value': v} for d, v in events]
payload['StartDate'] = timeseries.index[0].strftime(date_format)
payload['EndDate'] = timeseries.index[-1].strftime(date_format)
timeseries = payload
return self.update_json(url, timeseries)
def variable_piecewise(self, name):
'''
Returns piecewise linear function for a particular variable
'''
name = name.replace('$', '')
url = '/variables/%s/Piecewise' % name
result = self.retrieve_json(url)
return pd.DataFrame(result['Entries'], columns=[result[c] for c in ['XName', 'YName']])
def update_variable_piecewise(self, name, values):
'''
Update piecewise linear function for a given variable.
name: str, variable name to update.
'''
name = name.replace('$', '')
url = '/variables/%s/Piecewise' % name
if hasattr(values, 'columns'):
payload = {}
entries = list(
zip(values[values.columns[0]], values[values.columns[1]]))
payload['Entries'] = [[float(x), float(y)] for (x, y) in entries]
payload['XName'] = values.columns[0]
payload['YName'] = values.columns[1]
values = payload
print(values)
return self.update_json(url, values)
def data_sources(self):
'''
Return a SearchableList of the data sources in the Source model
Note: Returns a summary (min,max,mean,etc) of individual time series - NOT the full record.
You can get the time series by retrieving individual data sources (`data_source` method)
'''
return SearchableList(self.retrieve_json('/dataSources'))
def data_source(self, name):
'''
Return an individual data source, by name.
Note: Will include the each time series associated with the data source IN FULL
'''
prefix = '/dataSources/'
if not name.startswith(prefix):
name = prefix + name
result = self.retrieve_json(name)
def _transform_details(details):
if 'Events' in details[0]['TimeSeries']:
data_dict = {d['Name']: d['TimeSeries']['Events']
for d in details}
df = self._create_timeseries_dataframe(data_dict, common_index=False)
for d in details:
df[d['Name']].units = d['TimeSeries']['Units']
return df
# Slim Time Series...
ts = details[0]['TimeSeries']
start_t = self.parse_veneer_date(ts['StartDate'])
end_t = self.parse_veneer_date(ts['EndDate'])
freq = ts['TimeStep'][0]
index = pd.date_range(start_t, end_t, freq=freq)
data_dict = {d['Name']: d['TimeSeries']['Values'] for d in details}
df = pd.DataFrame(data_dict, index=index)
for d in details:
df[d['Name']].units = d['TimeSeries']['Units']
extensions._apply_time_series_helpers(df)
return df
def _transform_data_source_item(item):
item['Details'] = _transform_details(item['Details'])
return item
result['Items'] = SearchableList(
[_transform_data_source_item(i) for i in result['Items']])
return result
def create_data_source(self, name, data=None, units='mm/day', precision=3, reload_on_run=False):
'''
Create a new data source (name) using a Pandas dataframe (data)
If no dataframe is provided, name is interpreted as a filename
'''
dummy_data_group = {}
dummy_data_group['Name'] = name
dummy_item = {}
dummy_item['Name'] = 'Item for %s' % name
dummy_item['InputSets'] = ['Default Input Set']
dummy_detail = {}
dummy_detail['Name'] = 'Details for %s' % name
dummy_detail['TimeSeries'] = {}
#dummy_item['Details'] = [dummy_detail]
if data is not None:
dummy_item['DetailsAsCSV'] = data.to_csv(
float_format='%%.%df' % precision)
dummy_item['ReloadOnRun'] = reload_on_run
dummy_item['UnitsForNewTS'] = units
dummy_data_group['Items'] = [dummy_item]
return self.post_json('/dataSources', data=dummy_data_group)
def delete_data_source(self, group):
'''
Tell Source to drop/delete a specific set of results from memory.
run: Run number to delete. Default ='latest'. Valid values are 'latest' and integers from 1
'''
assert self.live_source
conn = hc.HTTPConnection(self.host, port=self.port)
conn.request('DELETE', '/dataSources/%s' % str(quote(group)))
resp = conn.getresponse()
code = resp.getcode()
return code
def data_source_item(self, source, name=None, input_set='__all__'):
if name:
source = '/'.join([source, input_set,
_veneer_url_safe_id_string(name)])
else:
name = source
prefix = '/dataSources/'
if not source.startswith(prefix):
source = prefix + source
result = self.retrieve_json(source)
def _transform(res):
if 'TimeSeries' in res:
df = self._create_timeseries_dataframe({name: res['TimeSeries']['Events']}, common_index=False)
df[df.columns[0]].units = res['TimeSeries']['Units']
return df
elif 'Items' in res:
data_dict = {}
suffix = ''
units = {}
for item in res['Items']:
if len(res['Items']) > 1:
suffix = " (%s)" % item['Name']
if 'Details' in item:
keys = ["%s%s" % (d['Name'], suffix) for d in item['Details']]
update = {
(key): d['TimeSeries']['Events'] for key,d in zip(keys,item['Details'])}
for k,d in zip(keys,items['Details']):
units[k] = d['TimeSeries']['Units']
data_dict.update(update)
df = self._create_timeseries_dataframe(data_dict, common_index=False)
for k, v in units:
df[k].units = v
return df
return res
if isinstance(result, list):
if len(result) == 1:
result = result[0]
else:
return [_transform(r) for r in result]
return _transform(result)
def result_matches_criteria(self, result, criteria):
import re
# MATCH_ALL='__all__'
for key, pattern in criteria.items():
# if pattern==MATCH_ALL: continue
if not re.match(pattern, result[key]):
return False
return True
def input_sets(self):
'''
Return a SearchableList of the input sets in the Source model
Each input set will be a Python dictionary representing the different information in the input set
'''
return SearchableList(self.retrieve_json('/inputSets'))
def update_input_set(self, name, input_set):
'''
Modify the input set and send to Source.
name: str, name of input set
input_set: A Python dictionary representing the updated input set. Should contain the same fields as the input set
returned from the input_sets method.
'''
return self.send_json('/inputSets/%s' % (name.replace(' ', '%20')), method='PUT', data=input_set)
def create_input_set(self, input_set):
'''
Create a new input set in Source model.
input_set: A Python dictionary representing the updated input set. Should contain the same fields as the input set
returned from the input_sets method. (eg Configuration,Filename,Name,ReloadOnRun)
'''
return self.post_json('/inputSets', data=input_set)
def apply_input_set(self, name):
'''
Have Source apply a given input set
'''
return self.send('/inputSets/%s/run' % (name.replace('%', '%25').replace(' ', '%20')), 'POST')
def timeseries_suffix(self,timestep='daily'):
if timestep == "daily":
return ""
return "/aggregated/%s" % timestep
def retrieve_multiple_time_series(self, run='latest', run_data=None, criteria={}, timestep='daily', name_fn=name_element_variable):
"""
Retrieve multiple time series from a run according to some criteria.
Return all time series in a single Pandas DataFrame with date time index.
you can an index of run results via run_data. If you don't the method will first retrieve an
index based on the value of the run parameter (default='latest')
criteria should be regexps for the fields in a Veneer time series record:
* NetworkElement
* RecordingElement
* RecordingVariable
* TimeSeriesName
* TimeSeriesUrl
* FunctionalUnit
These criteria are used to identify which time series to retrieve.
timestep should be one of 'daily' (default), 'monthly', 'annual'.
*WARNING*: The monthly and annual option uses the corresponding option in the Veneer plugin, which ALWAYS SUMS values,
regardless of units. So, if you retrieve a rate variable (eg m^3/s) those values will be summed and you will need to
correct this manually in the returned DataFrame.
All retrieved time series are returned in a single Data Frame.
You can specify a function for naming the columns of the Data Frame using name_fn. This function should take
the results summary (from the index) and return a string. Example functions include:
* veneer.name_time_series (uses the full name of the time series, as provided by Source)
* veneer.name_element_variable (DEFAULT: users the name of the network element and the name of the variable)
* veneer.name_for_location (just use the name of the network element)
* veneer.name_for_variable (just use the name of the variable)
"""
suffix = self.timeseries_suffix(timestep)
if run_data is None:
run_data = self.retrieve_run(run)
retrieved = {}
def name_column(result):
col_name = name_fn(result)
if col_name in retrieved:
i = 1
alt_col_name = '%s %d' % (col_name, i)
while alt_col_name in retrieved:
i += 1
alt_col_name = '%s %d' % (col_name, i)
col_name = alt_col_name
return col_name
units_store = {}
for result in run_data['Results']:
if self.result_matches_criteria(result, criteria):
d = self.retrieve_json(result['TimeSeriesUrl'] + suffix)
result.update(d)
col_name = name_column(result)
# raise Exception("Duplicate column name: %s"%col_name)
if 'Events' in d:
retrieved[col_name] = d['Events']
units_store[col_name] = result['Units']
else:
all_ts = d['TimeSeries']
for ts in all_ts:
col_name = name_column(ts)
units_store[col_name] = ts['Units']
vals = ts['Values']
s = self.parse_veneer_date(ts['StartDate'])
e = self.parse_veneer_date(ts['EndDate'])
if ts['TimeStep'] == 'Daily':
f = 'D'
elif ts['TimeStep'] == 'Monthly':
f = 'M'
elif ts['TimeStep'] == 'Annual':
f = 'A'
dates = pd.date_range(s, e, freq=f)
retrieved[col_name] = [
{'Date': d, 'Value': v} for d, v in zip(dates, vals)]
# Multi Time Series!
result = self._create_timeseries_dataframe(retrieved)
for k, u in units_store.items():
result[k].units = u
return result
def summarise_timeseries(self,
column_attr,
run=None,
run_data=None,
criteria={},
timestep='daily',
index_attr=None,
scale=1.0,
renames={},
report_interval=5000):
'''
column_attr: meta attribute (derived from criteria) used to name the columns of dataframes
run,
run_results
criteria: dict-like object with keys matching retrieval (eg NetworkElement, etc),
but where the values of these search criteria can be regular expressions with named groups.
These named groups are used in summarising the data and attributing the resulting tables
timestep: daily, monthly, annual, mean-monthly, mean-annual or None
index_attr: if timestep is None, index_attr should...
scale: A scaling factor for the data (eg to change units)
renames: A nested dictionary of tags and tag values to rename
report_interval
'''
def rename_tags(tags,renames):
result = {}
for k,v in tags.items():
result[k] = v
if not k in renames:
continue
lookup = renames[k]
if not v in lookup:
continue
result[k] = lookup[v]
return result
units_seen = []
suffix = self.timeseries_suffix(timestep or 'annual')
count = 0
if run_data is None:
run_data = self.retrieve_run(run)
criteria = [(k,re.compile(v, re.IGNORECASE)) for k,v in criteria.items()]
tag_order = None
summaries = {}
for ix,result in enumerate(run_data['Results']):
matching = True
tags = {}
for criteria_key, criteria_pattern in criteria:
match = criteria_pattern.match(result[criteria_key])
if match is None:
matching = False
break
tags.update(rename_tags(match.groupdict(),renames))
if not matching:
continue
column = tags.pop(column_attr)
if tag_order is None:
tag_order = list(tags.keys())
tag_values = tuple([tags[k] for k in tag_order])
if not tag_values in summaries:
summaries[tag_values] = {}
table = summaries[tag_values]
data = self.retrieve_json(result['TimeSeriesUrl'] + suffix)
assert 'Events' in data
units = data['Units']
if not units in units_seen:
print('Units from %s = %s'%(result['TimeSeriesUrl'],units))
units_seen.append(units)
table[column] = data['Events']
count += 1
if count and (count % report_interval)==0:
print('Match %d, (row %d/%d)'%(count,ix,len(run_data['Results'])),result['TimeSeriesUrl'],'matches',column, tags)
print('Units seen: %s'%(','.join(units_seen),))
return [(dict(zip(tag_order,tags)),self._create_timeseries_dataframe(table)*scale) for tags,table in summaries.items()]
def parse_veneer_date(self, txt):
if hasattr(txt, 'strftime'):
return txt
return pd.datetime.strptime(txt, '%m/%d/%Y %H:%M:%S')
def convert_dates(self, events):
return [{'Date': self.parse_veneer_date(e['Date']), 'Value':e['Value']} for e in events]
def _create_timeseries_dataframe(self, data_dict, common_index=True):
if len(data_dict) == 0:
df = pd.DataFrame()
elif common_index:
index = [self.parse_veneer_date(event['Date'])
for event in list(data_dict.values())[0]]
data = {k: [event['Value'] for event in result]
for k, result in data_dict.items()}
df = pd.DataFrame(data=data, index=index)
else:
from functools import reduce
dataFrames = [pd.DataFrame(self.convert_dates(ts)).set_index(
'Date').rename(columns={'Value': k}) for k, ts in data_dict.items()]
df = reduce(lambda l, r: l.join(r, how='outer'), dataFrames)
extensions._apply_time_series_helpers(df)
return df
def _extend_network(nw):
nw = objdict(nw)
nw['features'] = SearchableList(
nw['features'], ['geometry', 'properties'])
extensions.add_network_methods(nw)
return nw
def read_sdt(fn):
ts = pd.read_table(fn, delim_whitespace=True, engine='python',
names=['Year', 'Month', 'Day', 'Val'])
ts['Date'] = ts.apply(lambda row: pd.datetime(
int(row.Year), int(row.Month), int(row.Day)), axis=1)
ts = ts.set_index(ts.Date)
return ts.Val
def to_source_date(the_date):
if hasattr(the_date, 'strftime'):
return the_date.strftime('%d/%m/%Y')
return the_date
def read_rescsv(fn,header_attributes=['WaterFeatureType','Site','Structure']):
'''
Read a .res.csv file saved from Source
Returns
* attributes - Pandas Dataframe of the various metadata attributes in the file
* data - Pandas dataframe of the time series
'''
import pandas as pd
import re
import io
text = open(fn, 'r').read()
r = re.compile('\nEOH\n')
header, body = r.split(text)
r = re.compile('\nEOC\n')
config, headers = r.split(header)
attribute_names = config.splitlines()[-1].split(',')
attributes = pd.DataFrame(
[dict(zip(attribute_names, line.split(','))) for line in headers.splitlines()[1:-1]])
columns = attributes[header_attributes[0]].copy()
for ha in header_attributes[1:]:
columns += ': ' + attributes[ha]
columns = ['Date'] + list(columns)
data = pd.read_csv(io.StringIO(body), header=None, index_col=0,
parse_dates=True, dayfirst=True, names=columns)
return attributes, data
def expand_run_results_metadata(run,network):
'''
Use to add additional metadata to a run results set. Useful for making more specific queries to
v.retrieve_multiple_time_series
Currently:
* Expands NetworkElement column to add 'feature_type' and 'node_type'
'''
features = network['features'].as_dataframe()
features['node_type'] = features['icon'].str.split('/',expand=True)[2]
features = features[['name','feature_type','node_type']]
results = run['Results'].as_dataframe()
merged = pd.merge(results,features,how='left',left_on='NetworkElement',right_on='name')
run = run.copy()
run['Results'] = merged.to_dict(orient='records')
return run
if __name__ == '__main__':
# Output
from .bulk import VeneerRetriever
destination = sys.argv[1] if len(
sys.argv) > 1 else "C:\\temp\\veneer_download\\"
print("Downloading all Veneer data to %s" % destination)
retriever = VeneerRetriever(destination)
retriever.retrieve_all(destination)
| isc |
phoebe-project/phoebe2 | tests/nosetests/test_dynamics/test_dynamics_grid.py | 1 | 9061 | """
"""
import phoebe
from phoebe import u
import numpy as np
import matplotlib.pyplot as plt
def _keplerian_v_nbody(b, ltte, period, plot=False):
"""
test a single bundle for the phoebe backend's kepler vs nbody dynamics methods
"""
# TODO: loop over ltte=True,False (once keplerian dynamics supports the switch)
b.set_value('dynamics_method', 'bs')
times = np.linspace(0, 5*period, 101)
nb_ts, nb_us, nb_vs, nb_ws, nb_vus, nb_vvs, nb_vws = phoebe.dynamics.nbody.dynamics_from_bundle(b, times, ltte=ltte)
k_ts, k_us, k_vs, k_ws, k_vus, k_vvs, k_vws = phoebe.dynamics.keplerian.dynamics_from_bundle(b, times, ltte=ltte)
assert(np.allclose(nb_ts, k_ts, 1e-8))
for ci in range(len(b.hierarchy.get_stars())):
# TODO: make rtol lower if possible
assert(np.allclose(nb_us[ci], k_us[ci], rtol=1e-5, atol=1e-2))
assert(np.allclose(nb_vs[ci], k_vs[ci], rtol=1e-5, atol=1e-2))
assert(np.allclose(nb_ws[ci], k_ws[ci], rtol=1e-5, atol=1e-2))
# nbody ltte velocities are wrong so only check velocities if ltte off
if not ltte:
assert(np.allclose(nb_vus[ci], k_vus[ci], rtol=1e-5, atol=1e-2))
assert(np.allclose(nb_vvs[ci], k_vvs[ci], rtol=1e-5, atol=1e-2))
assert(np.allclose(nb_vws[ci], k_vws[ci], rtol=1e-5, atol=1e-2))
def _phoebe_v_photodynam(b, period, plot=False):
"""
test a single bundle for phoebe's nbody vs photodynam via the frontend
"""
times = np.linspace(0, 5*period, 21)
b.add_dataset('orb', times=times, dataset='orb01', component=b.hierarchy.get_stars())
# photodynam and phoebe should have the same nbody defaults... if for some reason that changes,
# then this will probably fail
b.add_compute('photodynam', compute='pdcompute')
# photodynam backend ONLY works with ltte=True, so we will run the phoebe backend with that as well
# TODO: remove distortion_method='nbody' once that is supported
b.set_value('dynamics_method', 'bs')
b.set_value('ltte', True)
b.run_compute('pdcompute', model='pdresults')
b.run_compute('phoebe01', model='phoeberesults')
for comp in b.hierarchy.get_stars():
# TODO: check to see how low we can make atol (or change to rtol?)
# TODO: look into justification of flipping x and y for both dynamics (photodynam & phoebe)
# TODO: why the small discrepancy (visible especially in y, still <1e-11) - possibly a difference in time0 or just a precision limit in the photodynam backend since loading from a file??
if plot:
for k in ['us', 'vs', 'ws', 'vus', 'vvs', 'vws']:
plt.cla()
plt.plot(b.get_value('times', model='phoeberesults', component=comp, unit=u.d), b.get_value(k, model='phoeberesults', component=comp), 'r-')
plt.plot(b.get_value('times', model='phoeberesults', component=comp, unit=u.d), b.get_value(k, model='pdresults', component=comp), 'b-')
diff = abs(b.get_value(k, model='phoeberesults', component=comp) - b.get_value(k, model='pdresults', component=comp))
print("*** max abs: {}".format(max(diff)))
plt.xlabel('t')
plt.ylabel(k)
plt.show()
assert(np.allclose(b.get_value('times', model='phoeberesults', component=comp, unit=u.d), b.get_value('times', model='pdresults', component=comp, unit=u.d), rtol=0, atol=1e-05))
assert(np.allclose(b.get_value('us', model='phoeberesults', component=comp, unit=u.AU), b.get_value('us', model='pdresults', component=comp, unit=u.AU), rtol=0, atol=1e-05))
assert(np.allclose(b.get_value('vs', model='phoeberesults', component=comp, unit=u.AU), b.get_value('vs', model='pdresults', component=comp, unit=u.AU), rtol=0, atol=1e-05))
assert(np.allclose(b.get_value('ws', model='phoeberesults', component=comp, unit=u.AU), b.get_value('ws', model='pdresults', component=comp, unit=u.AU), rtol=0, atol=1e-05))
#assert(np.allclose(b.get_value('vxs', model='phoeberesults', component=comp, unit=u.solRad/u.d), b.get_value('vxs', model='pdresults', component=comp, unit=u.solRad/u.d), rtol=0, atol=1e-05))
#assert(np.allclose(b.get_value('vys', model='phoeberesults', component=comp, unit=u.solRad/u.d), b.get_value('vys', model='pdresults', component=comp, unit=u.solRad/u.d), rtol=0, atol=1e-05))
#assert(np.allclose(b.get_value('vzs', model='phoeberesults', component=comp, unit=u.solRad/u.d), b.get_value('vzs', model='pdresults', component=comp, unit=u.solRad/u.d), rtol=0, atol=1e-05))
def _frontend_v_backend(b, ltte, period, plot=False):
"""
test a single bundle for the frontend vs backend access to both kepler and nbody dynamics
"""
# TODO: loop over ltte=True,False
times = np.linspace(0, 5*period, 101)
b.add_dataset('orb', times=times, dataset='orb01', component=b.hierarchy.get_stars())
b.rename_compute('phoebe01', 'nbody')
b.set_value('dynamics_method', 'bs')
b.set_value('ltte', ltte)
b.add_compute('phoebe', dynamics_method='keplerian', compute='keplerian', ltte=ltte)
# NBODY
# do backend Nbody
b_ts, b_us, b_vs, b_ws, b_vus, b_vvs, b_vws = phoebe.dynamics.nbody.dynamics_from_bundle(b, times, compute='nbody', ltte=ltte)
# do frontend Nbody
b.run_compute('nbody', model='nbodyresults')
for ci,comp in enumerate(b.hierarchy.get_stars()):
# TODO: can we lower tolerance?
assert(np.allclose(b.get_value('times', model='nbodyresults', component=comp, unit=u.d), b_ts, rtol=0, atol=1e-6))
assert(np.allclose(b.get_value('us', model='nbodyresults', component=comp, unit=u.solRad), b_us[ci], rtol=1e-7, atol=1e-4))
assert(np.allclose(b.get_value('vs', model='nbodyresults', component=comp, unit=u.solRad), b_vs[ci], rtol=1e-7, atol=1e-4))
assert(np.allclose(b.get_value('ws', model='nbodyresults', component=comp, unit=u.solRad), b_ws[ci], rtol=1e-7, atol=1e-4))
if not ltte:
assert(np.allclose(b.get_value('vus', model='nbodyresults', component=comp, unit=u.solRad/u.d), b_vus[ci], rtol=1e-7, atol=1e-4))
assert(np.allclose(b.get_value('vvs', model='nbodyresults', component=comp, unit=u.solRad/u.d), b_vvs[ci], rtol=1e-7, atol=1e-4))
assert(np.allclose(b.get_value('vws', model='nbodyresults', component=comp, unit=u.solRad/u.d), b_vws[ci], rtol=1e-7, atol=1e-4))
# KEPLERIAN
# do backend keplerian
b_ts, b_us, b_vs, b_ws, b_vus, b_vvs, b_vws = phoebe.dynamics.keplerian.dynamics_from_bundle(b, times, compute='keplerian', ltte=ltte)
# do frontend keplerian
b.run_compute('keplerian', model='keplerianresults')
for ci,comp in enumerate(b.hierarchy.get_stars()):
# TODO: can we lower tolerance?
assert(np.allclose(b.get_value('times', model='keplerianresults', component=comp, unit=u.d), b_ts, rtol=0, atol=1e-08))
assert(np.allclose(b.get_value('us', model='keplerianresults', component=comp, unit=u.solRad), b_us[ci], rtol=0, atol=1e-08))
assert(np.allclose(b.get_value('vs', model='keplerianresults', component=comp, unit=u.solRad), b_vs[ci], rtol=0, atol=1e-08))
assert(np.allclose(b.get_value('ws', model='keplerianresults', component=comp, unit=u.solRad), b_ws[ci], rtol=0, atol=1e-08))
assert(np.allclose(b.get_value('vus', model='keplerianresults', component=comp, unit=u.solRad/u.d), b_vus[ci], rtol=0, atol=1e-08))
assert(np.allclose(b.get_value('vvs', model='keplerianresults', component=comp, unit=u.solRad/u.d), b_vvs[ci], rtol=0, atol=1e-08))
assert(np.allclose(b.get_value('vws', model='keplerianresults', component=comp, unit=u.solRad/u.d), b_vws[ci], rtol=0, atol=1e-08))
def test_binary(plot=False):
"""
"""
phoebe.devel_on() # required for nbody dynamics
# TODO: once ps.copy is implemented, just send b.copy() to each of these
# system = [sma (AU), period (d)]
system1 = [0.05, 2.575]
system2 = [1., 257.5]
system3 = [40., 65000.]
for system in [system1,system2,system3]:
for q in [0.5,1.]:
for ltte in [True, False]:
print("test_dynamics_grid: sma={}, period={}, q={}, ltte={}".format(system[0], system[1], q, ltte))
b = phoebe.default_binary()
b.get_parameter('dynamics_method')._choices = ['keplerian', 'bs']
b.set_default_unit_all('sma', u.AU)
b.set_default_unit_all('period', u.d)
b.set_value('sma@binary',system[0])
b.set_value('period@binary', system[1])
b.set_value('q', q)
_keplerian_v_nbody(b, ltte, system[1], plot=plot)
_frontend_v_backend(b, ltte, system[1], plot=plot)
phoebe.devel_off() # reset for future tests
if __name__ == '__main__':
logger = phoebe.logger(clevel='INFO')
test_binary(plot=True)
# TODO: create tests for both triple configurations (A--B-C, A-B--C) - these should first be default bundles
| gpl-3.0 |
ChanChiChoi/scikit-learn | examples/applications/plot_stock_market.py | 227 | 8284 | """
=======================================
Visualizing the stock market structure
=======================================
This example employs several unsupervised learning techniques to extract
the stock market structure from variations in historical quotes.
The quantity that we use is the daily variation in quote price: quotes
that are linked tend to cofluctuate during a day.
.. _stock_market:
Learning a graph structure
--------------------------
We use sparse inverse covariance estimation to find which quotes are
correlated conditionally on the others. Specifically, sparse inverse
covariance gives us a graph, that is a list of connection. For each
symbol, the symbols that it is connected too are those useful to explain
its fluctuations.
Clustering
----------
We use clustering to group together quotes that behave similarly. Here,
amongst the :ref:`various clustering techniques <clustering>` available
in the scikit-learn, we use :ref:`affinity_propagation` as it does
not enforce equal-size clusters, and it can choose automatically the
number of clusters from the data.
Note that this gives us a different indication than the graph, as the
graph reflects conditional relations between variables, while the
clustering reflects marginal properties: variables clustered together can
be considered as having a similar impact at the level of the full stock
market.
Embedding in 2D space
---------------------
For visualization purposes, we need to lay out the different symbols on a
2D canvas. For this we use :ref:`manifold` techniques to retrieve 2D
embedding.
Visualization
-------------
The output of the 3 models are combined in a 2D graph where nodes
represents the stocks and edges the:
- cluster labels are used to define the color of the nodes
- the sparse covariance model is used to display the strength of the edges
- the 2D embedding is used to position the nodes in the plan
This example has a fair amount of visualization-related code, as
visualization is crucial here to display the graph. One of the challenge
is to position the labels minimizing overlap. For this we use an
heuristic based on the direction of the nearest neighbor along each
axis.
"""
print(__doc__)
# Author: Gael Varoquaux [email protected]
# License: BSD 3 clause
import datetime
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import finance
from matplotlib.collections import LineCollection
from sklearn import cluster, covariance, manifold
###############################################################################
# Retrieve the data from Internet
# Choose a time period reasonnably calm (not too long ago so that we get
# high-tech firms, and before the 2008 crash)
d1 = datetime.datetime(2003, 1, 1)
d2 = datetime.datetime(2008, 1, 1)
# kraft symbol has now changed from KFT to MDLZ in yahoo
symbol_dict = {
'TOT': 'Total',
'XOM': 'Exxon',
'CVX': 'Chevron',
'COP': 'ConocoPhillips',
'VLO': 'Valero Energy',
'MSFT': 'Microsoft',
'IBM': 'IBM',
'TWX': 'Time Warner',
'CMCSA': 'Comcast',
'CVC': 'Cablevision',
'YHOO': 'Yahoo',
'DELL': 'Dell',
'HPQ': 'HP',
'AMZN': 'Amazon',
'TM': 'Toyota',
'CAJ': 'Canon',
'MTU': 'Mitsubishi',
'SNE': 'Sony',
'F': 'Ford',
'HMC': 'Honda',
'NAV': 'Navistar',
'NOC': 'Northrop Grumman',
'BA': 'Boeing',
'KO': 'Coca Cola',
'MMM': '3M',
'MCD': 'Mc Donalds',
'PEP': 'Pepsi',
'MDLZ': 'Kraft Foods',
'K': 'Kellogg',
'UN': 'Unilever',
'MAR': 'Marriott',
'PG': 'Procter Gamble',
'CL': 'Colgate-Palmolive',
'GE': 'General Electrics',
'WFC': 'Wells Fargo',
'JPM': 'JPMorgan Chase',
'AIG': 'AIG',
'AXP': 'American express',
'BAC': 'Bank of America',
'GS': 'Goldman Sachs',
'AAPL': 'Apple',
'SAP': 'SAP',
'CSCO': 'Cisco',
'TXN': 'Texas instruments',
'XRX': 'Xerox',
'LMT': 'Lookheed Martin',
'WMT': 'Wal-Mart',
'WBA': 'Walgreen',
'HD': 'Home Depot',
'GSK': 'GlaxoSmithKline',
'PFE': 'Pfizer',
'SNY': 'Sanofi-Aventis',
'NVS': 'Novartis',
'KMB': 'Kimberly-Clark',
'R': 'Ryder',
'GD': 'General Dynamics',
'RTN': 'Raytheon',
'CVS': 'CVS',
'CAT': 'Caterpillar',
'DD': 'DuPont de Nemours'}
symbols, names = np.array(list(symbol_dict.items())).T
quotes = [finance.quotes_historical_yahoo(symbol, d1, d2, asobject=True)
for symbol in symbols]
open = np.array([q.open for q in quotes]).astype(np.float)
close = np.array([q.close for q in quotes]).astype(np.float)
# The daily variations of the quotes are what carry most information
variation = close - open
###############################################################################
# Learn a graphical structure from the correlations
edge_model = covariance.GraphLassoCV()
# standardize the time series: using correlations rather than covariance
# is more efficient for structure recovery
X = variation.copy().T
X /= X.std(axis=0)
edge_model.fit(X)
###############################################################################
# Cluster using affinity propagation
_, labels = cluster.affinity_propagation(edge_model.covariance_)
n_labels = labels.max()
for i in range(n_labels + 1):
print('Cluster %i: %s' % ((i + 1), ', '.join(names[labels == i])))
###############################################################################
# Find a low-dimension embedding for visualization: find the best position of
# the nodes (the stocks) on a 2D plane
# We use a dense eigen_solver to achieve reproducibility (arpack is
# initiated with random vectors that we don't control). In addition, we
# use a large number of neighbors to capture the large-scale structure.
node_position_model = manifold.LocallyLinearEmbedding(
n_components=2, eigen_solver='dense', n_neighbors=6)
embedding = node_position_model.fit_transform(X.T).T
###############################################################################
# Visualization
plt.figure(1, facecolor='w', figsize=(10, 8))
plt.clf()
ax = plt.axes([0., 0., 1., 1.])
plt.axis('off')
# Display a graph of the partial correlations
partial_correlations = edge_model.precision_.copy()
d = 1 / np.sqrt(np.diag(partial_correlations))
partial_correlations *= d
partial_correlations *= d[:, np.newaxis]
non_zero = (np.abs(np.triu(partial_correlations, k=1)) > 0.02)
# Plot the nodes using the coordinates of our embedding
plt.scatter(embedding[0], embedding[1], s=100 * d ** 2, c=labels,
cmap=plt.cm.spectral)
# Plot the edges
start_idx, end_idx = np.where(non_zero)
#a sequence of (*line0*, *line1*, *line2*), where::
# linen = (x0, y0), (x1, y1), ... (xm, ym)
segments = [[embedding[:, start], embedding[:, stop]]
for start, stop in zip(start_idx, end_idx)]
values = np.abs(partial_correlations[non_zero])
lc = LineCollection(segments,
zorder=0, cmap=plt.cm.hot_r,
norm=plt.Normalize(0, .7 * values.max()))
lc.set_array(values)
lc.set_linewidths(15 * values)
ax.add_collection(lc)
# Add a label to each node. The challenge here is that we want to
# position the labels to avoid overlap with other labels
for index, (name, label, (x, y)) in enumerate(
zip(names, labels, embedding.T)):
dx = x - embedding[0]
dx[index] = 1
dy = y - embedding[1]
dy[index] = 1
this_dx = dx[np.argmin(np.abs(dy))]
this_dy = dy[np.argmin(np.abs(dx))]
if this_dx > 0:
horizontalalignment = 'left'
x = x + .002
else:
horizontalalignment = 'right'
x = x - .002
if this_dy > 0:
verticalalignment = 'bottom'
y = y + .002
else:
verticalalignment = 'top'
y = y - .002
plt.text(x, y, name, size=10,
horizontalalignment=horizontalalignment,
verticalalignment=verticalalignment,
bbox=dict(facecolor='w',
edgecolor=plt.cm.spectral(label / float(n_labels)),
alpha=.6))
plt.xlim(embedding[0].min() - .15 * embedding[0].ptp(),
embedding[0].max() + .10 * embedding[0].ptp(),)
plt.ylim(embedding[1].min() - .03 * embedding[1].ptp(),
embedding[1].max() + .03 * embedding[1].ptp())
plt.show()
| bsd-3-clause |
elijah513/scikit-learn | examples/cluster/plot_kmeans_digits.py | 230 | 4524 | """
===========================================================
A demo of K-Means clustering on the handwritten digits data
===========================================================
In this example we compare the various initialization strategies for
K-means in terms of runtime and quality of the results.
As the ground truth is known here, we also apply different cluster
quality metrics to judge the goodness of fit of the cluster labels to the
ground truth.
Cluster quality metrics evaluated (see :ref:`clustering_evaluation` for
definitions and discussions of the metrics):
=========== ========================================================
Shorthand full name
=========== ========================================================
homo homogeneity score
compl completeness score
v-meas V measure
ARI adjusted Rand index
AMI adjusted mutual information
silhouette silhouette coefficient
=========== ========================================================
"""
print(__doc__)
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn import metrics
from sklearn.cluster import KMeans
from sklearn.datasets import load_digits
from sklearn.decomposition import PCA
from sklearn.preprocessing import scale
np.random.seed(42)
digits = load_digits()
data = scale(digits.data)
n_samples, n_features = data.shape
n_digits = len(np.unique(digits.target))
labels = digits.target
sample_size = 300
print("n_digits: %d, \t n_samples %d, \t n_features %d"
% (n_digits, n_samples, n_features))
print(79 * '_')
print('% 9s' % 'init'
' time inertia homo compl v-meas ARI AMI silhouette')
def bench_k_means(estimator, name, data):
t0 = time()
estimator.fit(data)
print('% 9s %.2fs %i %.3f %.3f %.3f %.3f %.3f %.3f'
% (name, (time() - t0), estimator.inertia_,
metrics.homogeneity_score(labels, estimator.labels_),
metrics.completeness_score(labels, estimator.labels_),
metrics.v_measure_score(labels, estimator.labels_),
metrics.adjusted_rand_score(labels, estimator.labels_),
metrics.adjusted_mutual_info_score(labels, estimator.labels_),
metrics.silhouette_score(data, estimator.labels_,
metric='euclidean',
sample_size=sample_size)))
bench_k_means(KMeans(init='k-means++', n_clusters=n_digits, n_init=10),
name="k-means++", data=data)
bench_k_means(KMeans(init='random', n_clusters=n_digits, n_init=10),
name="random", data=data)
# in this case the seeding of the centers is deterministic, hence we run the
# kmeans algorithm only once with n_init=1
pca = PCA(n_components=n_digits).fit(data)
bench_k_means(KMeans(init=pca.components_, n_clusters=n_digits, n_init=1),
name="PCA-based",
data=data)
print(79 * '_')
###############################################################################
# Visualize the results on PCA-reduced data
reduced_data = PCA(n_components=2).fit_transform(data)
kmeans = KMeans(init='k-means++', n_clusters=n_digits, n_init=10)
kmeans.fit(reduced_data)
# Step size of the mesh. Decrease to increase the quality of the VQ.
h = .02 # point in the mesh [x_min, m_max]x[y_min, y_max].
# Plot the decision boundary. For that, we will assign a color to each
x_min, x_max = reduced_data[:, 0].min() - 1, reduced_data[:, 0].max() + 1
y_min, y_max = reduced_data[:, 1].min() - 1, reduced_data[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
# Obtain labels for each point in mesh. Use last trained model.
Z = kmeans.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure(1)
plt.clf()
plt.imshow(Z, interpolation='nearest',
extent=(xx.min(), xx.max(), yy.min(), yy.max()),
cmap=plt.cm.Paired,
aspect='auto', origin='lower')
plt.plot(reduced_data[:, 0], reduced_data[:, 1], 'k.', markersize=2)
# Plot the centroids as a white X
centroids = kmeans.cluster_centers_
plt.scatter(centroids[:, 0], centroids[:, 1],
marker='x', s=169, linewidths=3,
color='w', zorder=10)
plt.title('K-means clustering on the digits dataset (PCA-reduced data)\n'
'Centroids are marked with white cross')
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
LS80/FFL | FFL/teams/views.py | 1 | 10427 | from datetime import timedelta
from django.shortcuts import render
from django.http import HttpResponse, HttpResponseRedirect, Http404
from django.db.models import Sum
from django.core.urlresolvers import reverse
from django.core.mail import EmailMessage
from django.template.loader import render_to_string
from numpy import array, reshape, mean, cumsum
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
from matplotlib import rc
from FFL.teams.models import Team, SquadMember, WeekPoints
from FFL.teams.forms import GraphForm, SelectTeamForm
from FFL.game.formation import Formation
from FFL.game import config
from FFL.game import gameinfo
rc('font', size=8)
rc('legend', fontsize=8)
def week_from(week=None):
if week:
return max(1, int(week))
else:
return 1
def week_to(week=None):
latest = WeekPoints.objects.latest_week()
if week:
return min(latest, int(week))
else:
return latest
def league(request):
return render(request, 'league.html',
{'week': gameinfo.gameWeek(),
'teams': Team.objects.order_by('-total_points','id')})
def joke_league(request):
week = WeekPoints.objects.latest_week()
teams = list(Team.joke.set_league_positions(week, save=False))
return render(request, 'league.html',
{'type': "Joke ", 'week': week, 'teams': teams})
def weekly_performance(request, from_week=None, to_week=None):
week = gameinfo.gameWeek()
if week:
MAX_WEEKS = 20
to_week = week_to(to_week)
from_week = max(week_from(from_week), (to_week - MAX_WEEKS + 1))
if from_week <= to_week:
nweeks = to_week - from_week + 1
weeknums = range(from_week, to_week + 1)
table = WeekPoints.objects.table(from_week, to_week)
return render(request, 'weeklyperformance.html',
{'ncols': nweeks + 1, 'weeknums': weeknums, 'points': table,
'prev': from_week != 1,
'next': to_week != WeekPoints.objects.latest_week()})
else:
raise Http404()
else:
return render(request, 'weeklyperformance.html', {'week': week})
def team(request, manager, week=None):
manager = manager.replace('-', ' ')
try:
team = Team.objects.get(manager__iexact=manager)
except Team.DoesNotExist:
raise Http404()
latest_week = WeekPoints.objects.latest_week()
if week is None:
week = latest_week
else:
week = int(week)
if not 1 <= week <= latest_week + 1:
raise Http404()
league = list(Team.objects.all())
index = league.index(team)
n = len(league)
team_links = (league[(index - 1) % n], league[(index + 1) % n])
week_links = (week > 1, week < latest_week + 1)
players = team.squad.in_team(week)
subs = team.squad.subs(week)
for p in players:
p.week_points = p.player.points(week)
date_from = gameinfo.weekToDate(week)
date_to = date_from + timedelta(days=6)
return render(request, 'team.html',
{'team': team, 'players': players, 'subs': subs, 'latest_week': latest_week,
'week': week, 'date_from': date_from, 'date_to': date_to,
'week_links': week_links, 'team_links': team_links})
def team_wp(request, manager):
manager = manager.replace('-', ' ')
try:
team = Team.objects.get(manager__iexact=manager)
except Team.DoesNotExist:
raise Http404()
week = WeekPoints.objects.latest_week()
points = team.weekly_points.values_list('points', flat=True)
weeks = range(1, week + 1)
teams = list(WeekPoints.objects.values_list('points', flat=True))
shape = (Team.objects.count(), week)
avgs = mean(reshape(array(teams), shape), axis=0)
fig = Figure(figsize=(7, 3), dpi=100, facecolor='white')
ax = fig.add_subplot(111)
rects = ax.bar(weeks, points, align='center', linewidth=1, color='#008ad1', width=1)
ax.set_xlabel("Week")
ax.set_ylabel("Points")
ax.set_xticks(weeks) # add a tick for every week
for rect in rects:
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width() / 2., height / 2., str(height),
fontsize=10, color="#ffffff", ha='center')
ax.set_xlim((0.5, max(10, week) + 0.5))
ax.plot(weeks, avgs, color='blue', marker='*', label='Week average score')
ax.legend(markerscale=0, handlelength=3)
canvas = FigureCanvas(fig)
response = HttpResponse(content_type='image/png')
canvas.print_png(response)
return response
def graph(request, teams=None, from_week=None, to_week=None):
week = gameinfo.gameWeek()
if week:
if request.GET:
# form has been submitted - redirect to the full url.
form = GraphForm(request.GET)
if form.is_valid():
teams = '-'.join([str(team.id) for team in form.cleaned_data['team']])
from_week = week_from(form.cleaned_data['from_week'])
to_week = week_to(form.cleaned_data['to_week'])
return HttpResponseRedirect(reverse(graph,
args=[teams, from_week, to_week]))
else:
if teams is None:
# we are at the basic url so render an empty form.
form = GraphForm()
else:
# we are at the full url - add the values into the form and render the page.
try:
ids = map(int, teams.split('-'))
except ValueError:
raise Http404()
for team_id in ids:
if not Team.objects.filter(id=team_id):
raise Http404()
from_week = week_from(from_week)
to_week = week_to(to_week)
form = GraphForm(initial={'team': Team.objects.filter(id__in=ids),
'from_week': from_week,
'to_week': to_week})
return render(request, 'graph.html',
{'form': form, 'teams': teams,
'from_week': from_week,
'to_week': to_week})
else:
# the game has yet to start so there is nothing to graph.
return render('graph.html', {'week': week})
def graph_plot_png(f, teams, from_week=None, to_week=None):
to_week = week_to(to_week)
from_week = week_from(from_week)
weeks = range(from_week, to_week + 1)
fig = Figure(figsize=(8, 4), dpi=100, facecolor='white')
ax = fig.add_subplot(111)
for team_id in teams:
team = Team.objects.get(id=team_id)
points = team.weekly_points.filter(week__in=weeks)
if not points:
raise Http404()
if from_week > 1:
prev_points = (team.weekly_points.filter(week__lt=from_week)
.aggregate(Sum('points'))['points__sum'])
else:
prev_points = 0
cum_points = cumsum(points.values_list('points', flat=True)) + prev_points
ax.plot(weeks, cum_points, linewidth=1,
label="{0} ({1})".format(team.manager, team.total_points))
ax.legend(bbox_to_anchor=(1.02, 1), markerscale=0, handlelength=3, loc='upper left')
ax.set_xlabel("Week")
ax.set_ylabel("Points")
ax.set_xticks(weeks) # add a tick for every week
ax.set_xlim(from_week, to_week)
fig.subplots_adjust(left=0.06, right=0.70, top=0.95, bottom=0.08)
canvas = FigureCanvas(fig)
canvas.print_png(f)
return f
def graph_plot(request, teams, from_week, to_week):
response = HttpResponse(content_type='image/png')
return graph_plot_png(response, map(int, teams.split('-')), from_week, to_week)
def formations_pie(request):
fig = Figure(figsize=(6, 6), dpi=100, facecolor='white')
ax = fig.add_subplot(111)
formations = list(Team.objects.values_list('formation', flat=True))
counts = [formations.count(f) for f in Formation.formations]
ax.pie(counts, labels=Formation.formations, autopct='%1.1f%%')
canvas = FigureCanvas(fig)
response = HttpResponse(content_type='image/png')
canvas.print_png(response)
return response
def team_submitted(request):
return render(request, 'team_submitted.html')
def submit_team(request):
if request.method == 'POST':
form = SelectTeamForm(request.POST)
if form.is_valid():
team = form.save()
for field in Formation.fields:
for player in form.cleaned_data[field]:
SquadMember.objects.create(team=team,
player=player,
week_in=gameinfo.gameWeek() + 1)
week = WeekPoints.objects.latest_week()
if week is not None:
for w in range(1, week + 1):
WeekPoints.objects.create(team=team, week=w)
html = render_to_string('team_email.html', {'team': team})
email = EmailMessage('Your Team', html, '[email protected]',
to=[team.email],
bcc=['[email protected]'],
headers={'Reply-To': '[email protected]',
'From': 'Fantasy Football'})
email.content_subtype = 'html'
email.send()
return HttpResponseRedirect(reverse(team_submitted))
else:
form = SelectTeamForm()
return render(request, 'submit_team.html',
{'form': form,
'budget': config.MAX_COST,
'week': gameinfo.gameWeek() + 1})
| mit |
lhilt/scipy | scipy/optimize/zeros.py | 3 | 50128 | from __future__ import division, print_function, absolute_import
import warnings
from collections import namedtuple
from . import _zeros
import numpy as np
_iter = 100
_xtol = 2e-12
_rtol = 4 * np.finfo(float).eps
__all__ = ['newton', 'bisect', 'ridder', 'brentq', 'brenth', 'toms748',
'RootResults']
# Must agree with CONVERGED, SIGNERR, CONVERR, ... in zeros.h
_ECONVERGED = 0
_ESIGNERR = -1
_ECONVERR = -2
_EVALUEERR = -3
_EINPROGRESS = 1
CONVERGED = 'converged'
SIGNERR = 'sign error'
CONVERR = 'convergence error'
VALUEERR = 'value error'
INPROGRESS = 'No error'
flag_map = {_ECONVERGED: CONVERGED, _ESIGNERR: SIGNERR, _ECONVERR: CONVERR,
_EVALUEERR: VALUEERR, _EINPROGRESS: INPROGRESS}
class RootResults(object):
"""Represents the root finding result.
Attributes
----------
root : float
Estimated root location.
iterations : int
Number of iterations needed to find the root.
function_calls : int
Number of times the function was called.
converged : bool
True if the routine converged.
flag : str
Description of the cause of termination.
"""
def __init__(self, root, iterations, function_calls, flag):
self.root = root
self.iterations = iterations
self.function_calls = function_calls
self.converged = flag == _ECONVERGED
self.flag = None
try:
self.flag = flag_map[flag]
except KeyError:
self.flag = 'unknown error %d' % (flag,)
def __repr__(self):
attrs = ['converged', 'flag', 'function_calls',
'iterations', 'root']
m = max(map(len, attrs)) + 1
return '\n'.join([a.rjust(m) + ': ' + repr(getattr(self, a))
for a in attrs])
def results_c(full_output, r):
if full_output:
x, funcalls, iterations, flag = r
results = RootResults(root=x,
iterations=iterations,
function_calls=funcalls,
flag=flag)
return x, results
else:
return r
def _results_select(full_output, r):
"""Select from a tuple of (root, funccalls, iterations, flag)"""
x, funcalls, iterations, flag = r
if full_output:
results = RootResults(root=x,
iterations=iterations,
function_calls=funcalls,
flag=flag)
return x, results
return x
def newton(func, x0, fprime=None, args=(), tol=1.48e-8, maxiter=50,
fprime2=None, x1=None, rtol=0.0,
full_output=False, disp=True):
"""
Find a zero of a real or complex function using the Newton-Raphson
(or secant or Halley's) method.
Find a zero of the function `func` given a nearby starting point `x0`.
The Newton-Raphson method is used if the derivative `fprime` of `func`
is provided, otherwise the secant method is used. If the second order
derivative `fprime2` of `func` is also provided, then Halley's method is
used.
If `x0` is a sequence with more than one item, then `newton` returns an
array, and `func` must be vectorized and return a sequence or array of the
same shape as its first argument. If `fprime` or `fprime2` is given then
its return must also have the same shape.
Parameters
----------
func : callable
The function whose zero is wanted. It must be a function of a
single variable of the form ``f(x,a,b,c...)``, where ``a,b,c...``
are extra arguments that can be passed in the `args` parameter.
x0 : float, sequence, or ndarray
An initial estimate of the zero that should be somewhere near the
actual zero. If not scalar, then `func` must be vectorized and return
a sequence or array of the same shape as its first argument.
fprime : callable, optional
The derivative of the function when available and convenient. If it
is None (default), then the secant method is used.
args : tuple, optional
Extra arguments to be used in the function call.
tol : float, optional
The allowable error of the zero value. If `func` is complex-valued,
a larger `tol` is recommended as both the real and imaginary parts
of `x` contribute to ``|x - x0|``.
maxiter : int, optional
Maximum number of iterations.
fprime2 : callable, optional
The second order derivative of the function when available and
convenient. If it is None (default), then the normal Newton-Raphson
or the secant method is used. If it is not None, then Halley's method
is used.
x1 : float, optional
Another estimate of the zero that should be somewhere near the
actual zero. Used if `fprime` is not provided.
rtol : float, optional
Tolerance (relative) for termination.
full_output : bool, optional
If `full_output` is False (default), the root is returned.
If True and `x0` is scalar, the return value is ``(x, r)``, where ``x``
is the root and ``r`` is a `RootResults` object.
If True and `x0` is non-scalar, the return value is ``(x, converged,
zero_der)`` (see Returns section for details).
disp : bool, optional
If True, raise a RuntimeError if the algorithm didn't converge, with
the error message containing the number of iterations and current
function value. Otherwise the convergence status is recorded in a
`RootResults` return object.
Ignored if `x0` is not scalar.
*Note: this has little to do with displaying, however
the `disp` keyword cannot be renamed for backwards compatibility.*
Returns
-------
root : float, sequence, or ndarray
Estimated location where function is zero.
r : `RootResults`, optional
Present if ``full_output=True`` and `x0` is scalar.
Object containing information about the convergence. In particular,
``r.converged`` is True if the routine converged.
converged : ndarray of bool, optional
Present if ``full_output=True`` and `x0` is non-scalar.
For vector functions, indicates which elements converged successfully.
zero_der : ndarray of bool, optional
Present if ``full_output=True`` and `x0` is non-scalar.
For vector functions, indicates which elements had a zero derivative.
See Also
--------
brentq, brenth, ridder, bisect
fsolve : find zeros in n dimensions.
Notes
-----
The convergence rate of the Newton-Raphson method is quadratic,
the Halley method is cubic, and the secant method is
sub-quadratic. This means that if the function is well behaved
the actual error in the estimated zero after the n-th iteration
is approximately the square (cube for Halley) of the error
after the (n-1)-th step. However, the stopping criterion used
here is the step size and there is no guarantee that a zero
has been found. Consequently the result should be verified.
Safer algorithms are brentq, brenth, ridder, and bisect,
but they all require that the root first be bracketed in an
interval where the function changes sign. The brentq algorithm
is recommended for general use in one dimensional problems
when such an interval has been found.
When `newton` is used with arrays, it is best suited for the following
types of problems:
* The initial guesses, `x0`, are all relatively the same distance from
the roots.
* Some or all of the extra arguments, `args`, are also arrays so that a
class of similar problems can be solved together.
* The size of the initial guesses, `x0`, is larger than O(100) elements.
Otherwise, a naive loop may perform as well or better than a vector.
Examples
--------
>>> from scipy import optimize
>>> import matplotlib.pyplot as plt
>>> def f(x):
... return (x**3 - 1) # only one real root at x = 1
``fprime`` is not provided, use the secant method:
>>> root = optimize.newton(f, 1.5)
>>> root
1.0000000000000016
>>> root = optimize.newton(f, 1.5, fprime2=lambda x: 6 * x)
>>> root
1.0000000000000016
Only ``fprime`` is provided, use the Newton-Raphson method:
>>> root = optimize.newton(f, 1.5, fprime=lambda x: 3 * x**2)
>>> root
1.0
Both ``fprime2`` and ``fprime`` are provided, use Halley's method:
>>> root = optimize.newton(f, 1.5, fprime=lambda x: 3 * x**2,
... fprime2=lambda x: 6 * x)
>>> root
1.0
When we want to find zeros for a set of related starting values and/or
function parameters, we can provide both of those as an array of inputs:
>>> f = lambda x, a: x**3 - a
>>> fder = lambda x, a: 3 * x**2
>>> np.random.seed(4321)
>>> x = np.random.randn(100)
>>> a = np.arange(-50, 50)
>>> vec_res = optimize.newton(f, x, fprime=fder, args=(a, ))
The above is the equivalent of solving for each value in ``(x, a)``
separately in a for-loop, just faster:
>>> loop_res = [optimize.newton(f, x0, fprime=fder, args=(a0,))
... for x0, a0 in zip(x, a)]
>>> np.allclose(vec_res, loop_res)
True
Plot the results found for all values of ``a``:
>>> analytical_result = np.sign(a) * np.abs(a)**(1/3)
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.plot(a, analytical_result, 'o')
>>> ax.plot(a, vec_res, '.')
>>> ax.set_xlabel('$a$')
>>> ax.set_ylabel('$x$ where $f(x, a)=0$')
>>> plt.show()
"""
if tol <= 0:
raise ValueError("tol too small (%g <= 0)" % tol)
if maxiter < 1:
raise ValueError("maxiter must be greater than 0")
if np.size(x0) > 1:
return _array_newton(func, x0, fprime, args, tol, maxiter, fprime2,
full_output)
# Convert to float (don't use float(x0); this works also for complex x0)
p0 = 1.0 * x0
funcalls = 0
if fprime is not None:
# Newton-Raphson method
for itr in range(maxiter):
# first evaluate fval
fval = func(p0, *args)
funcalls += 1
# If fval is 0, a root has been found, then terminate
if fval == 0:
return _results_select(
full_output, (p0, funcalls, itr, _ECONVERGED))
fder = fprime(p0, *args)
funcalls += 1
if fder == 0:
msg = "Derivative was zero."
if disp:
msg += (
" Failed to converge after %d iterations, value is %s."
% (itr + 1, p0))
raise RuntimeError(msg)
warnings.warn(msg, RuntimeWarning)
return _results_select(
full_output, (p0, funcalls, itr + 1, _ECONVERR))
newton_step = fval / fder
if fprime2:
fder2 = fprime2(p0, *args)
funcalls += 1
# Halley's method:
# newton_step /= (1.0 - 0.5 * newton_step * fder2 / fder)
# Only do it if denominator stays close enough to 1
# Rationale: If 1-adj < 0, then Halley sends x in the
# opposite direction to Newton. Doesn't happen if x is close
# enough to root.
adj = newton_step * fder2 / fder / 2
if np.abs(adj) < 1:
newton_step /= 1.0 - adj
p = p0 - newton_step
if np.isclose(p, p0, rtol=rtol, atol=tol):
return _results_select(
full_output, (p, funcalls, itr + 1, _ECONVERGED))
p0 = p
else:
# Secant method
if x1 is not None:
if x1 == x0:
raise ValueError("x1 and x0 must be different")
p1 = x1
else:
eps = 1e-4
p1 = x0 * (1 + eps)
p1 += (eps if p1 >= 0 else -eps)
q0 = func(p0, *args)
funcalls += 1
q1 = func(p1, *args)
funcalls += 1
if abs(q1) < abs(q0):
p0, p1, q0, q1 = p1, p0, q1, q0
for itr in range(maxiter):
if q1 == q0:
if p1 != p0:
msg = "Tolerance of %s reached." % (p1 - p0)
if disp:
msg += (
" Failed to converge after %d iterations, value is %s."
% (itr + 1, p1))
raise RuntimeError(msg)
warnings.warn(msg, RuntimeWarning)
p = (p1 + p0) / 2.0
return _results_select(
full_output, (p, funcalls, itr + 1, _ECONVERGED))
else:
if abs(q1) > abs(q0):
p = (-q0 / q1 * p1 + p0) / (1 - q0 / q1)
else:
p = (-q1 / q0 * p0 + p1) / (1 - q1 / q0)
if np.isclose(p, p1, rtol=rtol, atol=tol):
return _results_select(
full_output, (p, funcalls, itr + 1, _ECONVERGED))
p0, q0 = p1, q1
p1 = p
q1 = func(p1, *args)
funcalls += 1
if disp:
msg = ("Failed to converge after %d iterations, value is %s."
% (itr + 1, p))
raise RuntimeError(msg)
return _results_select(full_output, (p, funcalls, itr + 1, _ECONVERR))
def _array_newton(func, x0, fprime, args, tol, maxiter, fprime2, full_output):
"""
A vectorized version of Newton, Halley, and secant methods for arrays.
Do not use this method directly. This method is called from `newton`
when ``np.size(x0) > 1`` is ``True``. For docstring, see `newton`.
"""
# Explicitly copy `x0` as `p` will be modified inplace, but, the
# user's array should not be altered.
try:
p = np.array(x0, copy=True, dtype=float)
except TypeError:
# can't convert complex to float
p = np.array(x0, copy=True)
failures = np.ones_like(p, dtype=bool)
nz_der = np.ones_like(failures)
if fprime is not None:
# Newton-Raphson method
for iteration in range(maxiter):
# first evaluate fval
fval = np.asarray(func(p, *args))
# If all fval are 0, all roots have been found, then terminate
if not fval.any():
failures = fval.astype(bool)
break
fder = np.asarray(fprime(p, *args))
nz_der = (fder != 0)
# stop iterating if all derivatives are zero
if not nz_der.any():
break
# Newton step
dp = fval[nz_der] / fder[nz_der]
if fprime2 is not None:
fder2 = np.asarray(fprime2(p, *args))
dp = dp / (1.0 - 0.5 * dp * fder2[nz_der] / fder[nz_der])
# only update nonzero derivatives
p[nz_der] -= dp
failures[nz_der] = np.abs(dp) >= tol # items not yet converged
# stop iterating if there aren't any failures, not incl zero der
if not failures[nz_der].any():
break
else:
# Secant method
dx = np.finfo(float).eps**0.33
p1 = p * (1 + dx) + np.where(p >= 0, dx, -dx)
q0 = np.asarray(func(p, *args))
q1 = np.asarray(func(p1, *args))
active = np.ones_like(p, dtype=bool)
for iteration in range(maxiter):
nz_der = (q1 != q0)
# stop iterating if all derivatives are zero
if not nz_der.any():
p = (p1 + p) / 2.0
break
# Secant Step
dp = (q1 * (p1 - p))[nz_der] / (q1 - q0)[nz_der]
# only update nonzero derivatives
p[nz_der] = p1[nz_der] - dp
active_zero_der = ~nz_der & active
p[active_zero_der] = (p1 + p)[active_zero_der] / 2.0
active &= nz_der # don't assign zero derivatives again
failures[nz_der] = np.abs(dp) >= tol # not yet converged
# stop iterating if there aren't any failures, not incl zero der
if not failures[nz_der].any():
break
p1, p = p, p1
q0 = q1
q1 = np.asarray(func(p1, *args))
zero_der = ~nz_der & failures # don't include converged with zero-ders
if zero_der.any():
# Secant warnings
if fprime is None:
nonzero_dp = (p1 != p)
# non-zero dp, but infinite newton step
zero_der_nz_dp = (zero_der & nonzero_dp)
if zero_der_nz_dp.any():
rms = np.sqrt(
sum((p1[zero_der_nz_dp] - p[zero_der_nz_dp]) ** 2)
)
warnings.warn(
'RMS of {:g} reached'.format(rms), RuntimeWarning)
# Newton or Halley warnings
else:
all_or_some = 'all' if zero_der.all() else 'some'
msg = '{:s} derivatives were zero'.format(all_or_some)
warnings.warn(msg, RuntimeWarning)
elif failures.any():
all_or_some = 'all' if failures.all() else 'some'
msg = '{0:s} failed to converge after {1:d} iterations'.format(
all_or_some, maxiter
)
if failures.all():
raise RuntimeError(msg)
warnings.warn(msg, RuntimeWarning)
if full_output:
result = namedtuple('result', ('root', 'converged', 'zero_der'))
p = result(p, ~failures, zero_der)
return p
def bisect(f, a, b, args=(),
xtol=_xtol, rtol=_rtol, maxiter=_iter,
full_output=False, disp=True):
"""
Find root of a function within an interval using bisection.
Basic bisection routine to find a zero of the function `f` between the
arguments `a` and `b`. `f(a)` and `f(b)` cannot have the same signs.
Slow but sure.
Parameters
----------
f : function
Python function returning a number. `f` must be continuous, and
f(a) and f(b) must have opposite signs.
a : scalar
One end of the bracketing interval [a,b].
b : scalar
The other end of the bracketing interval [a,b].
xtol : number, optional
The computed root ``x0`` will satisfy ``np.allclose(x, x0,
atol=xtol, rtol=rtol)``, where ``x`` is the exact root. The
parameter must be nonnegative.
rtol : number, optional
The computed root ``x0`` will satisfy ``np.allclose(x, x0,
atol=xtol, rtol=rtol)``, where ``x`` is the exact root. The
parameter cannot be smaller than its default value of
``4*np.finfo(float).eps``.
maxiter : int, optional
if convergence is not achieved in `maxiter` iterations, an error is
raised. Must be >= 0.
args : tuple, optional
containing extra arguments for the function `f`.
`f` is called by ``apply(f, (x)+args)``.
full_output : bool, optional
If `full_output` is False, the root is returned. If `full_output` is
True, the return value is ``(x, r)``, where x is the root, and r is
a `RootResults` object.
disp : bool, optional
If True, raise RuntimeError if the algorithm didn't converge.
Otherwise the convergence status is recorded in a `RootResults`
return object.
Returns
-------
x0 : float
Zero of `f` between `a` and `b`.
r : `RootResults` (present if ``full_output = True``)
Object containing information about the convergence. In particular,
``r.converged`` is True if the routine converged.
Examples
--------
>>> def f(x):
... return (x**2 - 1)
>>> from scipy import optimize
>>> root = optimize.bisect(f, 0, 2)
>>> root
1.0
>>> root = optimize.bisect(f, -2, 0)
>>> root
-1.0
See Also
--------
brentq, brenth, bisect, newton
fixed_point : scalar fixed-point finder
fsolve : n-dimensional root-finding
"""
if not isinstance(args, tuple):
args = (args,)
if xtol <= 0:
raise ValueError("xtol too small (%g <= 0)" % xtol)
if rtol < _rtol:
raise ValueError("rtol too small (%g < %g)" % (rtol, _rtol))
r = _zeros._bisect(f, a, b, xtol, rtol, maxiter, args, full_output, disp)
return results_c(full_output, r)
def ridder(f, a, b, args=(),
xtol=_xtol, rtol=_rtol, maxiter=_iter,
full_output=False, disp=True):
"""
Find a root of a function in an interval using Ridder's method.
Parameters
----------
f : function
Python function returning a number. f must be continuous, and f(a) and
f(b) must have opposite signs.
a : scalar
One end of the bracketing interval [a,b].
b : scalar
The other end of the bracketing interval [a,b].
xtol : number, optional
The computed root ``x0`` will satisfy ``np.allclose(x, x0,
atol=xtol, rtol=rtol)``, where ``x`` is the exact root. The
parameter must be nonnegative.
rtol : number, optional
The computed root ``x0`` will satisfy ``np.allclose(x, x0,
atol=xtol, rtol=rtol)``, where ``x`` is the exact root. The
parameter cannot be smaller than its default value of
``4*np.finfo(float).eps``.
maxiter : int, optional
if convergence is not achieved in `maxiter` iterations, an error is
raised. Must be >= 0.
args : tuple, optional
containing extra arguments for the function `f`.
`f` is called by ``apply(f, (x)+args)``.
full_output : bool, optional
If `full_output` is False, the root is returned. If `full_output` is
True, the return value is ``(x, r)``, where `x` is the root, and `r` is
a `RootResults` object.
disp : bool, optional
If True, raise RuntimeError if the algorithm didn't converge.
Otherwise the convergence status is recorded in any `RootResults`
return object.
Returns
-------
x0 : float
Zero of `f` between `a` and `b`.
r : `RootResults` (present if ``full_output = True``)
Object containing information about the convergence.
In particular, ``r.converged`` is True if the routine converged.
See Also
--------
brentq, brenth, bisect, newton : one-dimensional root-finding
fixed_point : scalar fixed-point finder
Notes
-----
Uses [Ridders1979]_ method to find a zero of the function `f` between the
arguments `a` and `b`. Ridders' method is faster than bisection, but not
generally as fast as the Brent routines. [Ridders1979]_ provides the
classic description and source of the algorithm. A description can also be
found in any recent edition of Numerical Recipes.
The routine used here diverges slightly from standard presentations in
order to be a bit more careful of tolerance.
References
----------
.. [Ridders1979]
Ridders, C. F. J. "A New Algorithm for Computing a
Single Root of a Real Continuous Function."
IEEE Trans. Circuits Systems 26, 979-980, 1979.
Examples
--------
>>> def f(x):
... return (x**2 - 1)
>>> from scipy import optimize
>>> root = optimize.ridder(f, 0, 2)
>>> root
1.0
>>> root = optimize.ridder(f, -2, 0)
>>> root
-1.0
"""
if not isinstance(args, tuple):
args = (args,)
if xtol <= 0:
raise ValueError("xtol too small (%g <= 0)" % xtol)
if rtol < _rtol:
raise ValueError("rtol too small (%g < %g)" % (rtol, _rtol))
r = _zeros._ridder(f, a, b, xtol, rtol, maxiter, args, full_output, disp)
return results_c(full_output, r)
def brentq(f, a, b, args=(),
xtol=_xtol, rtol=_rtol, maxiter=_iter,
full_output=False, disp=True):
"""
Find a root of a function in a bracketing interval using Brent's method.
Uses the classic Brent's method to find a zero of the function `f` on
the sign changing interval [a , b]. Generally considered the best of the
rootfinding routines here. It is a safe version of the secant method that
uses inverse quadratic extrapolation. Brent's method combines root
bracketing, interval bisection, and inverse quadratic interpolation. It is
sometimes known as the van Wijngaarden-Dekker-Brent method. Brent (1973)
claims convergence is guaranteed for functions computable within [a,b].
[Brent1973]_ provides the classic description of the algorithm. Another
description can be found in a recent edition of Numerical Recipes, including
[PressEtal1992]_. A third description is at
http://mathworld.wolfram.com/BrentsMethod.html. It should be easy to
understand the algorithm just by reading our code. Our code diverges a bit
from standard presentations: we choose a different formula for the
extrapolation step.
Parameters
----------
f : function
Python function returning a number. The function :math:`f`
must be continuous, and :math:`f(a)` and :math:`f(b)` must
have opposite signs.
a : scalar
One end of the bracketing interval :math:`[a, b]`.
b : scalar
The other end of the bracketing interval :math:`[a, b]`.
xtol : number, optional
The computed root ``x0`` will satisfy ``np.allclose(x, x0,
atol=xtol, rtol=rtol)``, where ``x`` is the exact root. The
parameter must be nonnegative. For nice functions, Brent's
method will often satisfy the above condition with ``xtol/2``
and ``rtol/2``. [Brent1973]_
rtol : number, optional
The computed root ``x0`` will satisfy ``np.allclose(x, x0,
atol=xtol, rtol=rtol)``, where ``x`` is the exact root. The
parameter cannot be smaller than its default value of
``4*np.finfo(float).eps``. For nice functions, Brent's
method will often satisfy the above condition with ``xtol/2``
and ``rtol/2``. [Brent1973]_
maxiter : int, optional
if convergence is not achieved in `maxiter` iterations, an error is
raised. Must be >= 0.
args : tuple, optional
containing extra arguments for the function `f`.
`f` is called by ``apply(f, (x)+args)``.
full_output : bool, optional
If `full_output` is False, the root is returned. If `full_output` is
True, the return value is ``(x, r)``, where `x` is the root, and `r` is
a `RootResults` object.
disp : bool, optional
If True, raise RuntimeError if the algorithm didn't converge.
Otherwise the convergence status is recorded in any `RootResults`
return object.
Returns
-------
x0 : float
Zero of `f` between `a` and `b`.
r : `RootResults` (present if ``full_output = True``)
Object containing information about the convergence. In particular,
``r.converged`` is True if the routine converged.
Notes
-----
`f` must be continuous. f(a) and f(b) must have opposite signs.
Related functions fall into several classes:
multivariate local optimizers
`fmin`, `fmin_powell`, `fmin_cg`, `fmin_bfgs`, `fmin_ncg`
nonlinear least squares minimizer
`leastsq`
constrained multivariate optimizers
`fmin_l_bfgs_b`, `fmin_tnc`, `fmin_cobyla`
global optimizers
`basinhopping`, `brute`, `differential_evolution`
local scalar minimizers
`fminbound`, `brent`, `golden`, `bracket`
n-dimensional root-finding
`fsolve`
one-dimensional root-finding
`brenth`, `ridder`, `bisect`, `newton`
scalar fixed-point finder
`fixed_point`
References
----------
.. [Brent1973]
Brent, R. P.,
*Algorithms for Minimization Without Derivatives*.
Englewood Cliffs, NJ: Prentice-Hall, 1973. Ch. 3-4.
.. [PressEtal1992]
Press, W. H.; Flannery, B. P.; Teukolsky, S. A.; and Vetterling, W. T.
*Numerical Recipes in FORTRAN: The Art of Scientific Computing*, 2nd ed.
Cambridge, England: Cambridge University Press, pp. 352-355, 1992.
Section 9.3: "Van Wijngaarden-Dekker-Brent Method."
Examples
--------
>>> def f(x):
... return (x**2 - 1)
>>> from scipy import optimize
>>> root = optimize.brentq(f, -2, 0)
>>> root
-1.0
>>> root = optimize.brentq(f, 0, 2)
>>> root
1.0
"""
if not isinstance(args, tuple):
args = (args,)
if xtol <= 0:
raise ValueError("xtol too small (%g <= 0)" % xtol)
if rtol < _rtol:
raise ValueError("rtol too small (%g < %g)" % (rtol, _rtol))
r = _zeros._brentq(f, a, b, xtol, rtol, maxiter, args, full_output, disp)
return results_c(full_output, r)
def brenth(f, a, b, args=(),
xtol=_xtol, rtol=_rtol, maxiter=_iter,
full_output=False, disp=True):
"""Find a root of a function in a bracketing interval using Brent's
method with hyperbolic extrapolation.
A variation on the classic Brent routine to find a zero of the function f
between the arguments a and b that uses hyperbolic extrapolation instead of
inverse quadratic extrapolation. There was a paper back in the 1980's ...
f(a) and f(b) cannot have the same signs. Generally on a par with the
brent routine, but not as heavily tested. It is a safe version of the
secant method that uses hyperbolic extrapolation. The version here is by
Chuck Harris.
Parameters
----------
f : function
Python function returning a number. f must be continuous, and f(a) and
f(b) must have opposite signs.
a : scalar
One end of the bracketing interval [a,b].
b : scalar
The other end of the bracketing interval [a,b].
xtol : number, optional
The computed root ``x0`` will satisfy ``np.allclose(x, x0,
atol=xtol, rtol=rtol)``, where ``x`` is the exact root. The
parameter must be nonnegative. As with `brentq`, for nice
functions the method will often satisfy the above condition
with ``xtol/2`` and ``rtol/2``.
rtol : number, optional
The computed root ``x0`` will satisfy ``np.allclose(x, x0,
atol=xtol, rtol=rtol)``, where ``x`` is the exact root. The
parameter cannot be smaller than its default value of
``4*np.finfo(float).eps``. As with `brentq`, for nice functions
the method will often satisfy the above condition with
``xtol/2`` and ``rtol/2``.
maxiter : int, optional
if convergence is not achieved in `maxiter` iterations, an error is
raised. Must be >= 0.
args : tuple, optional
containing extra arguments for the function `f`.
`f` is called by ``apply(f, (x)+args)``.
full_output : bool, optional
If `full_output` is False, the root is returned. If `full_output` is
True, the return value is ``(x, r)``, where `x` is the root, and `r` is
a `RootResults` object.
disp : bool, optional
If True, raise RuntimeError if the algorithm didn't converge.
Otherwise the convergence status is recorded in any `RootResults`
return object.
Returns
-------
x0 : float
Zero of `f` between `a` and `b`.
r : `RootResults` (present if ``full_output = True``)
Object containing information about the convergence. In particular,
``r.converged`` is True if the routine converged.
Examples
--------
>>> def f(x):
... return (x**2 - 1)
>>> from scipy import optimize
>>> root = optimize.brenth(f, -2, 0)
>>> root
-1.0
>>> root = optimize.brenth(f, 0, 2)
>>> root
1.0
See Also
--------
fmin, fmin_powell, fmin_cg,
fmin_bfgs, fmin_ncg : multivariate local optimizers
leastsq : nonlinear least squares minimizer
fmin_l_bfgs_b, fmin_tnc, fmin_cobyla : constrained multivariate optimizers
basinhopping, differential_evolution, brute : global optimizers
fminbound, brent, golden, bracket : local scalar minimizers
fsolve : n-dimensional root-finding
brentq, brenth, ridder, bisect, newton : one-dimensional root-finding
fixed_point : scalar fixed-point finder
"""
if not isinstance(args, tuple):
args = (args,)
if xtol <= 0:
raise ValueError("xtol too small (%g <= 0)" % xtol)
if rtol < _rtol:
raise ValueError("rtol too small (%g < %g)" % (rtol, _rtol))
r = _zeros._brenth(f, a, b, xtol, rtol, maxiter, args, full_output, disp)
return results_c(full_output, r)
################################
# TOMS "Algorithm 748: Enclosing Zeros of Continuous Functions", by
# Alefeld, G. E. and Potra, F. A. and Shi, Yixun,
# See [1]
def _within_tolerance(x, y, rtol, atol):
diff = np.abs(x - y)
z = np.abs(y)
result = (diff <= (atol + rtol * z))
return result
def _notclose(fs, rtol=_rtol, atol=_xtol):
# Ensure not None, not 0, all finite, and not very close to each other
notclosefvals = (
all(fs) and all(np.isfinite(fs)) and
not any(any(np.isclose(_f, fs[i + 1:], rtol=rtol, atol=atol))
for i, _f in enumerate(fs[:-1])))
return notclosefvals
def _secant(xvals, fvals):
"""Perform a secant step, taking a little care"""
# Secant has many "mathematically" equivalent formulations
# x2 = x0 - (x1 - x0)/(f1 - f0) * f0
# = x1 - (x1 - x0)/(f1 - f0) * f1
# = (-x1 * f0 + x0 * f1) / (f1 - f0)
# = (-f0 / f1 * x1 + x0) / (1 - f0 / f1)
# = (-f1 / f0 * x0 + x1) / (1 - f1 / f0)
x0, x1 = xvals[:2]
f0, f1 = fvals[:2]
if f0 == f1:
return np.nan
if np.abs(f1) > np.abs(f0):
x2 = (-f0 / f1 * x1 + x0) / (1 - f0 / f1)
else:
x2 = (-f1 / f0 * x0 + x1) / (1 - f1 / f0)
return x2
def _update_bracket(ab, fab, c, fc):
"""Update a bracket given (c, fc), return the discarded endpoints."""
fa, fb = fab
idx = (0 if np.sign(fa) * np.sign(fc) > 0 else 1)
rx, rfx = ab[idx], fab[idx]
fab[idx] = fc
ab[idx] = c
return rx, rfx
def _compute_divided_differences(xvals, fvals, N=None, full=True,
forward=True):
"""Return a matrix of divided differences for the xvals, fvals pairs
DD[i, j] = f[x_{i-j}, ..., x_i] for 0 <= j <= i
If full is False, just return the main diagonal(or last row):
f[a], f[a, b] and f[a, b, c].
If forward is False, return f[c], f[b, c], f[a, b, c]."""
if full:
if forward:
xvals = np.asarray(xvals)
else:
xvals = np.array(xvals)[::-1]
M = len(xvals)
N = M if N is None else min(N, M)
DD = np.zeros([M, N])
DD[:, 0] = fvals[:]
for i in range(1, N):
DD[i:, i] = (np.diff(DD[i - 1:, i - 1]) /
(xvals[i:] - xvals[:M - i]))
return DD
xvals = np.asarray(xvals)
dd = np.array(fvals)
row = np.array(fvals)
idx2Use = (0 if forward else -1)
dd[0] = fvals[idx2Use]
for i in range(1, len(xvals)):
denom = xvals[i:i + len(row) - 1] - xvals[:len(row) - 1]
row = np.diff(row)[:] / denom
dd[i] = row[idx2Use]
return dd
def _interpolated_poly(xvals, fvals, x):
"""Compute p(x) for the polynomial passing through the specified locations.
Use Neville's algorithm to compute p(x) where p is the minimal degree
polynomial passing through the points xvals, fvals"""
xvals = np.asarray(xvals)
N = len(xvals)
Q = np.zeros([N, N])
D = np.zeros([N, N])
Q[:, 0] = fvals[:]
D[:, 0] = fvals[:]
for k in range(1, N):
alpha = D[k:, k - 1] - Q[k - 1:N - 1, k - 1]
diffik = xvals[0:N - k] - xvals[k:N]
Q[k:, k] = (xvals[k:] - x) / diffik * alpha
D[k:, k] = (xvals[:N - k] - x) / diffik * alpha
# Expect Q[-1, 1:] to be small relative to Q[-1, 0] as x approaches a root
return np.sum(Q[-1, 1:]) + Q[-1, 0]
def _inverse_poly_zero(a, b, c, d, fa, fb, fc, fd):
"""Inverse cubic interpolation f-values -> x-values
Given four points (fa, a), (fb, b), (fc, c), (fd, d) with
fa, fb, fc, fd all distinct, find poly IP(y) through the 4 points
and compute x=IP(0).
"""
return _interpolated_poly([fa, fb, fc, fd], [a, b, c, d], 0)
def _newton_quadratic(ab, fab, d, fd, k):
"""Apply Newton-Raphson like steps, using divided differences to approximate f'
ab is a real interval [a, b] containing a root,
fab holds the real values of f(a), f(b)
d is a real number outside [ab, b]
k is the number of steps to apply
"""
a, b = ab
fa, fb = fab
_, B, A = _compute_divided_differences([a, b, d], [fa, fb, fd],
forward=True, full=False)
# _P is the quadratic polynomial through the 3 points
def _P(x):
# Horner evaluation of fa + B * (x - a) + A * (x - a) * (x - b)
return (A * (x - b) + B) * (x - a) + fa
if A == 0:
r = a - fa / B
else:
r = (a if np.sign(A) * np.sign(fa) > 0 else b)
# Apply k Newton-Raphson steps to _P(x), starting from x=r
for i in range(k):
r1 = r - _P(r) / (B + A * (2 * r - a - b))
if not (ab[0] < r1 < ab[1]):
if (ab[0] < r < ab[1]):
return r
r = sum(ab) / 2.0
break
r = r1
return r
class TOMS748Solver(object):
"""Solve f(x, *args) == 0 using Algorithm748 of Alefeld, Potro & Shi.
"""
_MU = 0.5
_K_MIN = 1
_K_MAX = 100 # A very high value for real usage. Expect 1, 2, maybe 3.
def __init__(self):
self.f = None
self.args = None
self.function_calls = 0
self.iterations = 0
self.k = 2
# ab=[a,b] is a global interval containing a root
self.ab = [np.nan, np.nan]
# fab is function values at a, b
self.fab = [np.nan, np.nan]
self.d = None
self.fd = None
self.e = None
self.fe = None
self.disp = False
self.xtol = _xtol
self.rtol = _rtol
self.maxiter = _iter
def configure(self, xtol, rtol, maxiter, disp, k):
self.disp = disp
self.xtol = xtol
self.rtol = rtol
self.maxiter = maxiter
# Silently replace a low value of k with 1
self.k = max(k, self._K_MIN)
# Noisily replace a high value of k with self._K_MAX
if self.k > self._K_MAX:
msg = "toms748: Overriding k: ->%d" % self._K_MAX
warnings.warn(msg, RuntimeWarning)
self.k = self._K_MAX
def _callf(self, x, error=True):
"""Call the user-supplied function, update book-keeping"""
fx = self.f(x, *self.args)
self.function_calls += 1
if not np.isfinite(fx) and error:
raise ValueError("Invalid function value: f(%f) -> %s " % (x, fx))
return fx
def get_result(self, x, flag=_ECONVERGED):
r"""Package the result and statistics into a tuple."""
return (x, self.function_calls, self.iterations, flag)
def _update_bracket(self, c, fc):
return _update_bracket(self.ab, self.fab, c, fc)
def start(self, f, a, b, args=()):
r"""Prepare for the iterations."""
self.function_calls = 0
self.iterations = 0
self.f = f
self.args = args
self.ab[:] = [a, b]
if not np.isfinite(a) or np.imag(a) != 0:
raise ValueError("Invalid x value: %s " % (a))
if not np.isfinite(b) or np.imag(b) != 0:
raise ValueError("Invalid x value: %s " % (b))
fa = self._callf(a)
if not np.isfinite(fa) or np.imag(fa) != 0:
raise ValueError("Invalid function value: f(%f) -> %s " % (a, fa))
if fa == 0:
return _ECONVERGED, a
fb = self._callf(b)
if not np.isfinite(fb) or np.imag(fb) != 0:
raise ValueError("Invalid function value: f(%f) -> %s " % (b, fb))
if fb == 0:
return _ECONVERGED, b
if np.sign(fb) * np.sign(fa) > 0:
raise ValueError("a, b must bracket a root f(%e)=%e, f(%e)=%e " %
(a, fa, b, fb))
self.fab[:] = [fa, fb]
return _EINPROGRESS, sum(self.ab) / 2.0
def get_status(self):
"""Determine the current status."""
a, b = self.ab[:2]
if _within_tolerance(a, b, self.rtol, self.xtol):
return _ECONVERGED, sum(self.ab) / 2.0
if self.iterations >= self.maxiter:
return _ECONVERR, sum(self.ab) / 2.0
return _EINPROGRESS, sum(self.ab) / 2.0
def iterate(self):
"""Perform one step in the algorithm.
Implements Algorithm 4.1(k=1) or 4.2(k=2) in [APS1995]
"""
self.iterations += 1
eps = np.finfo(float).eps
d, fd, e, fe = self.d, self.fd, self.e, self.fe
ab_width = self.ab[1] - self.ab[0] # Need the start width below
c = None
for nsteps in range(2, self.k+2):
# If the f-values are sufficiently separated, perform an inverse
# polynomial interpolation step. Otherwise nsteps repeats of
# an approximate Newton-Raphson step.
if _notclose(self.fab + [fd, fe], rtol=0, atol=32*eps):
c0 = _inverse_poly_zero(self.ab[0], self.ab[1], d, e,
self.fab[0], self.fab[1], fd, fe)
if self.ab[0] < c0 < self.ab[1]:
c = c0
if c is None:
c = _newton_quadratic(self.ab, self.fab, d, fd, nsteps)
fc = self._callf(c)
if fc == 0:
return _ECONVERGED, c
# re-bracket
e, fe = d, fd
d, fd = self._update_bracket(c, fc)
# u is the endpoint with the smallest f-value
uix = (0 if np.abs(self.fab[0]) < np.abs(self.fab[1]) else 1)
u, fu = self.ab[uix], self.fab[uix]
_, A = _compute_divided_differences(self.ab, self.fab,
forward=(uix == 0), full=False)
c = u - 2 * fu / A
if np.abs(c - u) > 0.5 * (self.ab[1] - self.ab[0]):
c = sum(self.ab) / 2.0
else:
if np.isclose(c, u, rtol=eps, atol=0):
# c didn't change (much).
# Either because the f-values at the endpoints have vastly
# differing magnitudes, or because the root is very close to
# that endpoint
frs = np.frexp(self.fab)[1]
if frs[uix] < frs[1 - uix] - 50: # Differ by more than 2**50
c = (31 * self.ab[uix] + self.ab[1 - uix]) / 32
else:
# Make a bigger adjustment, about the
# size of the requested tolerance.
mm = (1 if uix == 0 else -1)
adj = mm * np.abs(c) * self.rtol + mm * self.xtol
c = u + adj
if not self.ab[0] < c < self.ab[1]:
c = sum(self.ab) / 2.0
fc = self._callf(c)
if fc == 0:
return _ECONVERGED, c
e, fe = d, fd
d, fd = self._update_bracket(c, fc)
# If the width of the new interval did not decrease enough, bisect
if self.ab[1] - self.ab[0] > self._MU * ab_width:
e, fe = d, fd
z = sum(self.ab) / 2.0
fz = self._callf(z)
if fz == 0:
return _ECONVERGED, z
d, fd = self._update_bracket(z, fz)
# Record d and e for next iteration
self.d, self.fd = d, fd
self.e, self.fe = e, fe
status, xn = self.get_status()
return status, xn
def solve(self, f, a, b, args=(),
xtol=_xtol, rtol=_rtol, k=2, maxiter=_iter, disp=True):
r"""Solve f(x) = 0 given an interval containing a zero."""
self.configure(xtol=xtol, rtol=rtol, maxiter=maxiter, disp=disp, k=k)
status, xn = self.start(f, a, b, args)
if status == _ECONVERGED:
return self.get_result(xn)
# The first step only has two x-values.
c = _secant(self.ab, self.fab)
if not self.ab[0] < c < self.ab[1]:
c = sum(self.ab) / 2.0
fc = self._callf(c)
if fc == 0:
return self.get_result(c)
self.d, self.fd = self._update_bracket(c, fc)
self.e, self.fe = None, None
self.iterations += 1
while True:
status, xn = self.iterate()
if status == _ECONVERGED:
return self.get_result(xn)
if status == _ECONVERR:
fmt = "Failed to converge after %d iterations, bracket is %s"
if disp:
msg = fmt % (self.iterations + 1, self.ab)
raise RuntimeError(msg)
return self.get_result(xn, _ECONVERR)
def toms748(f, a, b, args=(), k=1,
xtol=_xtol, rtol=_rtol, maxiter=_iter,
full_output=False, disp=True):
"""
Find a zero using TOMS Algorithm 748 method.
Implements the Algorithm 748 method of Alefeld, Potro and Shi to find a
zero of the function `f` on the interval `[a , b]`, where `f(a)` and
`f(b)` must have opposite signs.
It uses a mixture of inverse cubic interpolation and
"Newton-quadratic" steps. [APS1995].
Parameters
----------
f : function
Python function returning a scalar. The function :math:`f`
must be continuous, and :math:`f(a)` and :math:`f(b)`
have opposite signs.
a : scalar,
lower boundary of the search interval
b : scalar,
upper boundary of the search interval
args : tuple, optional
containing extra arguments for the function `f`.
`f` is called by ``f(x, *args)``.
k : int, optional
The number of Newton quadratic steps to perform each
iteration. ``k>=1``.
xtol : scalar, optional
The computed root ``x0`` will satisfy ``np.allclose(x, x0,
atol=xtol, rtol=rtol)``, where ``x`` is the exact root. The
parameter must be nonnegative.
rtol : scalar, optional
The computed root ``x0`` will satisfy ``np.allclose(x, x0,
atol=xtol, rtol=rtol)``, where ``x`` is the exact root.
maxiter : int, optional
if convergence is not achieved in `maxiter` iterations, an error is
raised. Must be >= 0.
full_output : bool, optional
If `full_output` is False, the root is returned. If `full_output` is
True, the return value is ``(x, r)``, where `x` is the root, and `r` is
a `RootResults` object.
disp : bool, optional
If True, raise RuntimeError if the algorithm didn't converge.
Otherwise the convergence status is recorded in the `RootResults`
return object.
Returns
-------
x0 : float
Approximate Zero of `f`
r : `RootResults` (present if ``full_output = True``)
Object containing information about the convergence. In particular,
``r.converged`` is True if the routine converged.
See Also
--------
brentq, brenth, ridder, bisect, newton
fsolve : find zeroes in n dimensions.
Notes
-----
`f` must be continuous.
Algorithm 748 with ``k=2`` is asymptotically the most efficient
algorithm known for finding roots of a four times continuously
differentiable function.
In contrast with Brent's algorithm, which may only decrease the length of
the enclosing bracket on the last step, Algorithm 748 decreases it each
iteration with the same asymptotic efficiency as it finds the root.
For easy statement of efficiency indices, assume that `f` has 4
continuouous deriviatives.
For ``k=1``, the convergence order is at least 2.7, and with about
asymptotically 2 function evaluations per iteration, the efficiency
index is approximately 1.65.
For ``k=2``, the order is about 4.6 with asymptotically 3 function
evaluations per iteration, and the efficiency index 1.66.
For higher values of `k`, the efficiency index approaches
the `k`-th root of ``(3k-2)``, hence ``k=1`` or ``k=2`` are
usually appropriate.
References
----------
.. [APS1995]
Alefeld, G. E. and Potra, F. A. and Shi, Yixun,
*Algorithm 748: Enclosing Zeros of Continuous Functions*,
ACM Trans. Math. Softw. Volume 221(1995)
doi = {10.1145/210089.210111}
Examples
--------
>>> def f(x):
... return (x**3 - 1) # only one real root at x = 1
>>> from scipy import optimize
>>> root, results = optimize.toms748(f, 0, 2, full_output=True)
>>> root
1.0
>>> results
converged: True
flag: 'converged'
function_calls: 11
iterations: 5
root: 1.0
"""
if xtol <= 0:
raise ValueError("xtol too small (%g <= 0)" % xtol)
if rtol < _rtol / 4:
raise ValueError("rtol too small (%g < %g)" % (rtol, _rtol))
if maxiter < 1:
raise ValueError("maxiter must be greater than 0")
if not np.isfinite(a):
raise ValueError("a is not finite %s" % a)
if not np.isfinite(b):
raise ValueError("b is not finite %s" % b)
if a >= b:
raise ValueError("a and b are not an interval [%d, %d]" % (a, b))
if not k >= 1:
raise ValueError("k too small (%s < 1)" % k)
if not isinstance(args, tuple):
args = (args,)
solver = TOMS748Solver()
result = solver.solve(f, a, b, args=args, k=k, xtol=xtol, rtol=rtol,
maxiter=maxiter, disp=disp)
x, function_calls, iterations, flag = result
return _results_select(full_output, (x, function_calls, iterations, flag))
| bsd-3-clause |
chibill/CMCP | runtime/lib/tqdm/_tqdm.py | 2 | 20945 | """
Customisable progressbar decorator for iterators.
Includes a default (x)range iterator printing to stderr.
Usage:
>>> from tqdm import trange[, tqdm]
>>> for i in trange(10): #same as: for i in tqdm(xrange(10))
... ...
"""
# future division is important to divide integers and get as
# a result precise floating numbers (instead of truncated int)
from __future__ import division, absolute_import
# import compatibility functions and utilities
from ._utils import _supports_unicode, _environ_cols_wrapper, _range, _unich, \
_term_move_up
import sys
from time import time
__author__ = {"github.com/": ["noamraph", "obiwanus", "kmike", "hadim",
"casperdcl", "lrq3000"]}
__all__ = ['tqdm', 'trange', 'format_interval', 'format_meter']
def format_sizeof(num, suffix=''):
"""
Formats a number (greater than unity) with SI Order of Magnitude prefixes.
Parameters
----------
num : float
Number ( >= 1) to format.
suffix : str, optional
Post-postfix [default: ''].
Returns
-------
out : str
Number with Order of Magnitude SI unit postfix.
"""
for unit in ['', 'K', 'M', 'G', 'T', 'P', 'E', 'Z']:
if abs(num) < 999.95:
if abs(num) < 99.95:
if abs(num) < 9.995:
return '{0:1.2f}'.format(num) + unit + suffix
return '{0:2.1f}'.format(num) + unit + suffix
return '{0:3.0f}'.format(num) + unit + suffix
num /= 1000.0
return '{0:3.1f}Y'.format(num) + suffix
def format_interval(t):
"""
Formats a number of seconds as a clock time, [H:]MM:SS
Parameters
----------
t : int
Number of seconds.
Returns
-------
out : str
[H:]MM:SS
"""
mins, s = divmod(int(t), 60)
h, m = divmod(mins, 60)
if h:
return '{0:d}:{1:02d}:{2:02d}'.format(h, m, s)
else:
return '{0:02d}:{1:02d}'.format(m, s)
def format_meter(n, total, elapsed, ncols=None, prefix='', ascii=False,
unit='it', unit_scale=False, rate=None):
"""
Return a string-based progress bar given some parameters
Parameters
----------
n : int
Number of finished iterations.
total : int
The expected total number of iterations. If meaningless (), only
basic progress statistics are displayed (no ETA).
elapsed : float
Number of seconds passed since start.
ncols : int, optional
The width of the entire output message. If specified, dynamically
resizes the progress meter to stay within this bound
[default: None]. The fallback meter width is 10 for the progress bar
+ no limit for the iterations counter and statistics. If 0, will not
print any meter (only stats).
prefix : str, optional
Prefix message (included in total width) [default: ''].
ascii : bool, optional
If not set, use unicode (smooth blocks) to fill the meter
[default: False]. The fallback is to use ASCII characters (1-9 #).
unit : str, optional
The iteration unit [default: 'it'].
unit_scale : bool, optional
If set, the number of iterations will printed with an appropriate
SI metric prefix (K = 10^3, M = 10^6, etc.) [default: False].
rate : float, optional
Manual override for iteration rate.
If [default: None], uses n/elapsed.
Returns
-------
out : Formatted meter and stats, ready to display.
"""
# sanity check: total
if total and n > total:
total = None
elapsed_str = format_interval(elapsed)
# if unspecified, attempt to use rate = average speed
# (we allow manual override since predicting time is an arcane art)
if rate is None and elapsed:
rate = n / elapsed
rate_fmt = ((format_sizeof(rate) if unit_scale else
'{0:5.2f}'.format(rate)) if elapsed else
'?') \
+ unit + '/s'
if unit_scale:
n_fmt = format_sizeof(n)
total_fmt = format_sizeof(total) if total else None
else:
n_fmt = str(n)
total_fmt = str(total)
# total is known: we can predict some stats
if total:
# fractional and percentage progress
frac = n / total
percentage = frac * 100
remaining_str = format_interval((total - n) / rate) if rate else '?'
# format the stats displayed to the left and right sides of the bar
l_bar = (prefix if prefix else '') + '{0:3.0f}%|'.format(percentage)
r_bar = '| {0}/{1} [{2}<{3}, {4}]'.format(
n_fmt, total_fmt, elapsed_str, remaining_str, rate_fmt)
if ncols == 0:
return l_bar[:-1] + r_bar[1:]
# space available for bar's display
N_BARS = max(1, ncols - len(l_bar) - len(r_bar)) if ncols \
else 10
# format bar depending on availability of unicode/ascii chars
if ascii:
bar_length, frac_bar_length = divmod(
int(frac * N_BARS * 10), 10)
bar = '#' * bar_length
frac_bar = chr(48 + frac_bar_length) if frac_bar_length \
else ' '
else:
bar_length, frac_bar_length = divmod(int(frac * N_BARS * 8), 8)
bar = _unich(0x2588) * bar_length
frac_bar = _unich(0x2590 - frac_bar_length) \
if frac_bar_length else ' '
# whitespace padding
if bar_length < N_BARS:
full_bar = bar + frac_bar + \
' ' * max(N_BARS - bar_length - 1, 0)
else:
full_bar = bar + \
' ' * max(N_BARS - bar_length, 0)
return l_bar + full_bar + r_bar
# no total: no progressbar, ETA, just progress stats
else:
return (prefix if prefix else '') + '{0}{1} [{2}, {3}]'.format(
n_fmt, unit, elapsed_str, rate_fmt)
def StatusPrinter(file):
"""
Manage the printing and in-place updating of a line of characters.
Note that if the string is longer than a line, then in-place updating
may not work (it will print a new line at each refresh).
"""
fp = file
if not getattr(fp, 'flush', False): # pragma: no cover
fp.flush = lambda: None
last_printed_len = [0] # closure over mutable variable (fast)
def print_status(s):
len_s = len(s)
fp.write('\r' + s + (' ' * max(last_printed_len[0] - len_s, 0)))
fp.flush()
last_printed_len[0] = len_s
return print_status
class tqdm(object):
"""
Decorate an iterable object, returning an iterator which acts exactly
like the orignal iterable, but prints a dynamically updating
progressbar every time a value is requested.
"""
def __init__(self, iterable=None, desc=None, total=None, leave=False,
file=sys.stderr, ncols=None, mininterval=0.1,
maxinterval=10.0, miniters=None, ascii=None, disable=False,
unit='it', unit_scale=False, dynamic_ncols=False,
smoothing=0.3, nested=False, gui=False):
"""
Parameters
----------
iterable : iterable, optional
Iterable to decorate with a progressbar.
Leave blank [default: None] to manually manage the updates.
desc : str, optional
Prefix for the progressbar [default: None].
total : int, optional
The number of expected iterations. If not given, len(iterable)
is used if possible. As a last resort, only basic progress
statistics are displayed (no ETA, no progressbar). If `gui` is
True and this parameter needs subsequent updating, specify an
initial arbitrary large positive integer, e.g. int(9e9).
leave : bool, optional
If [default: False], removes all traces of the progressbar
upon termination of iteration.
file : `io.TextIOWrapper` or `io.StringIO`, optional
Specifies where to output the progress messages
[default: sys.stderr]. Uses `file.write(str)` and `file.flush()`
methods.
ncols : int, optional
The width of the entire output message. If specified,
dynamically resizes the progressbar to stay within this bound.
If [default: None], attempts to use environment width. The
fallback is a meter width of 10 and no limit for the counter and
statistics. If 0, will not print any meter (only stats).
mininterval : float, optional
Minimum progress update interval, in seconds [default: 0.1].
maxinterval : float, optional
Maximum progress update interval, in seconds [default: 10.0].
miniters : int, optional
Minimum progress update interval, in iterations [default: None].
ascii : bool, optional
If [default: None] or false, use unicode (smooth blocks) to fill
the meter. The fallback is to use ASCII characters `1-9 #`.
disable : bool
Whether to disable the entire progressbar wrapper
[default: False].
unit : str, optional
String that will be used to define the unit of each iteration
[default: 'it'].
unit_scale : bool, optional
If set, the number of iterations will be reduced/scaled
automatically and a metric prefix following the
International System of Units standard will be added
(kilo, mega, etc.) [default: False].
dynamic_ncols : bool, optional
If set, constantly alters `ncols` to the environment (allowing
for window resizes) [default: False].
smoothing : float
Exponential moving average smoothing factor for speed estimates
(ignored in GUI mode). Ranges from 0 (average speed) to 1
(current/instantaneous speed) [default: 0.3].
nested : bool, optional
Whether this iterable is nested in another one also managed by
`tqdm` [default: False]. Allows display of multiple, nested
progress bars.
gui : bool, optional
WARNING: internal paramer - do not use.
Use tqdm_gui(...) instead. If set, will attempt to use
matplotlib animations for a graphical output [default: false].
Returns
-------
out : decorated iterator.
"""
# Preprocess the arguments
if total is None and iterable is not None:
try:
total = len(iterable)
except (TypeError, AttributeError):
total = None
if ((ncols is None) and (file in (sys.stderr, sys.stdout))) or \
dynamic_ncols:
if dynamic_ncols: # pragma: no cover
dynamic_ncols = _environ_cols_wrapper()
ncols = dynamic_ncols(file)
else:
ncols = _environ_cols_wrapper()(file)
if miniters is None:
miniters = 0
dynamic_miniters = True
else:
dynamic_miniters = False
if mininterval is None:
mininterval = 0
if maxinterval is None:
maxinterval = 0
if ascii is None:
ascii = not _supports_unicode(file)
if smoothing is None:
smoothing = 0
# Store the arguments
self.iterable = iterable
self.desc = desc + ': ' if desc else ''
self.total = total
self.leave = leave
self.fp = file
self.ncols = ncols
self.mininterval = mininterval
self.maxinterval = maxinterval
self.miniters = miniters
self.dynamic_miniters = dynamic_miniters
self.ascii = ascii
self.disable = disable
self.unit = unit
self.unit_scale = unit_scale
self.gui = gui
self.dynamic_ncols = dynamic_ncols
self.smoothing = smoothing
self.avg_rate = None
# if nested, at initial sp() call we replace '\r' by '\n' to
# not overwrite the outer progress bar
self.nested = nested
if not gui:
# Initialize the screen printer
self.sp = StatusPrinter(self.fp)
if not disable:
if self.nested:
self.fp.write('\n')
self.sp(format_meter(0, total, 0,
(dynamic_ncols(file) if dynamic_ncols else ncols),
self.desc, ascii, unit, unit_scale))
# Init the time/iterations counters
self.start_t = self.last_print_t = time()
self.last_print_n = 0
self.n = 0
def __len__(self):
return len(self.iterable) if self.iterable else self.total
def __enter__(self):
return self
def __exit__(self, *exc):
self.close()
return False
def __iter__(self):
''' Backward-compatibility to use: for x in tqdm(iterable) '''
# Inlining instance variables as locals (speed optimisation)
iterable = self.iterable
# If the bar is disabled, then just walk the iterable
# (note: keep this check outside the loop for performance)
if self.disable:
for obj in iterable:
yield obj
else:
ncols = self.ncols
mininterval = self.mininterval
maxinterval = self.maxinterval
miniters = self.miniters
dynamic_miniters = self.dynamic_miniters
unit = self.unit
unit_scale = self.unit_scale
ascii = self.ascii
start_t = self.start_t
last_print_t = self.last_print_t
last_print_n = self.last_print_n
n = self.n
dynamic_ncols = self.dynamic_ncols
smoothing = self.smoothing
avg_rate = self.avg_rate
try:
sp = self.sp
except AttributeError:
raise DeprecationWarning('Please use tqdm_gui(...)'
' instead of tqdm(..., gui=True)')
for obj in iterable:
yield obj
# Update and print the progressbar.
# Note: does not call self.update(1) for speed optimisation.
n += 1
delta_it = n - last_print_n
# check the counter first (avoid calls to time())
if delta_it >= miniters:
cur_t = time()
delta_t = cur_t - last_print_t
if delta_t >= mininterval:
elapsed = cur_t - start_t
# EMA (not just overall average)
if smoothing and delta_t:
avg_rate = delta_it / delta_t \
if avg_rate is None \
else smoothing * delta_it / delta_t + \
(1 - smoothing) * avg_rate
sp(format_meter(
n, self.total, elapsed,
(dynamic_ncols(self.fp) if dynamic_ncols
else ncols),
self.desc, ascii, unit, unit_scale, avg_rate))
# If no `miniters` was specified, adjust automatically
# to the maximum iteration rate seen so far.
if dynamic_miniters:
if maxinterval and delta_t > maxinterval:
# Set miniters to correspond to maxinterval
miniters = delta_it * maxinterval / delta_t
elif mininterval and delta_t:
# EMA-weight miniters to converge
# towards the timeframe of mininterval
miniters = smoothing * delta_it * mininterval \
/ delta_t + (1 - smoothing) * miniters
else:
miniters = smoothing * delta_it + \
(1 - smoothing) * miniters
# Store old values for next call
last_print_n = n
last_print_t = cur_t
# Closing the progress bar.
# Update some internal variables for close().
self.last_print_n = last_print_n
self.n = n
self.close()
def update(self, n=1):
"""
Manually update the progress bar, useful for streams
such as reading files.
E.g.:
>>> t = tqdm(total=filesize) # Initialise
>>> for current_buffer in stream:
... ...
... t.update(len(current_buffer))
>>> t.close()
The last line is highly recommended, but possibly not necessary if
`t.update()` will be called in such a way that `filesize` will be
exactly reached and printed.
Parameters
----------
n : int
Increment to add to the internal counter of iterations
[default: 1].
"""
if self.disable:
return
if n < 1:
n = 1
self.n += n
delta_it = self.n - self.last_print_n # should be n?
if delta_it >= self.miniters:
# We check the counter first, to reduce the overhead of time()
cur_t = time()
delta_t = cur_t - self.last_print_t
if delta_t >= self.mininterval:
elapsed = cur_t - self.start_t
# EMA (not just overall average)
if self.smoothing and delta_t:
self.avg_rate = delta_it / delta_t \
if self.avg_rate is None \
else self.smoothing * delta_it / delta_t + \
(1 - self.smoothing) * self.avg_rate
if not hasattr(self, "sp"):
raise DeprecationWarning('Please use tqdm_gui(...)'
' instead of tqdm(..., gui=True)')
self.sp(format_meter(
self.n, self.total, elapsed,
(self.dynamic_ncols(self.fp) if self.dynamic_ncols
else self.ncols),
self.desc, self.ascii, self.unit, self.unit_scale,
self.avg_rate))
# If no `miniters` was specified, adjust automatically to the
# maximum iteration rate seen so far.
# e.g.: After running `tqdm.update(5)`, subsequent
# calls to `tqdm.update()` will only cause an update after
# at least 5 more iterations.
if self.dynamic_miniters:
if self.maxinterval and delta_t > self.maxinterval:
self.miniters = self.miniters * self.maxinterval \
/ delta_t
elif self.mininterval and delta_t:
self.miniters = self.smoothing * delta_it \
* self.mininterval / delta_t + \
(1 - self.smoothing) * self.miniters
else:
self.miniters = self.smoothing * delta_it + \
(1 - self.smoothing) * self.miniters
# Store old values for next call
self.last_print_n = self.n
self.last_print_t = cur_t
def close(self):
"""
Cleanup and (if leave=False) close the progressbar.
"""
if self.disable:
return
endchar = '\r'
if self.nested:
endchar += _term_move_up()
if self.leave:
if self.last_print_n < self.n:
cur_t = time()
# stats for overall rate (no weighted average)
self.sp(format_meter(
self.n, self.total, cur_t - self.start_t,
(self.dynamic_ncols(self.fp) if self.dynamic_ncols
else self.ncols),
self.desc, self.ascii, self.unit, self.unit_scale))
if self.nested:
self.fp.write(endchar)
else:
self.fp.write('\n')
else:
self.sp('')
self.fp.write(endchar)
def set_description(self, desc=None):
"""
Set/modify description of the progress bar.
"""
self.desc = desc + ': ' if desc else ''
def trange(*args, **kwargs):
"""
A shortcut for tqdm(xrange(*args), **kwargs).
On Python3+ range is used instead of xrange.
"""
return tqdm(_range(*args), **kwargs)
| gpl-3.0 |
dsm054/pandas | pandas/tests/io/test_excel.py | 1 | 99752 | # pylint: disable=E1101
import os
import warnings
from datetime import datetime, date, time, timedelta
from distutils.version import LooseVersion
from functools import partial
from warnings import catch_warnings
from collections import OrderedDict
import numpy as np
import pytest
from numpy import nan
import pandas as pd
import pandas.util.testing as tm
import pandas.util._test_decorators as td
from pandas import DataFrame, Index, MultiIndex, Series
from pandas.compat import u, range, map, BytesIO, iteritems, PY36
from pandas.core.config import set_option, get_option
from pandas.io.common import URLError
from pandas.io.excel import (
ExcelFile, ExcelWriter, read_excel, _XlwtWriter, _OpenpyxlWriter,
register_writer, _XlsxWriter
)
from pandas.io.formats.excel import ExcelFormatter
from pandas.io.parsers import read_csv
from pandas.util.testing import ensure_clean, makeCustomDataframe as mkdf
_seriesd = tm.getSeriesData()
_tsd = tm.getTimeSeriesData()
_frame = DataFrame(_seriesd)[:10]
_frame2 = DataFrame(_seriesd, columns=['D', 'C', 'B', 'A'])[:10]
_tsframe = tm.makeTimeDataFrame()[:5]
_mixed_frame = _frame.copy()
_mixed_frame['foo'] = 'bar'
@td.skip_if_no('xlrd', '0.9')
class SharedItems(object):
@pytest.fixture(autouse=True)
def setup_method(self, datapath):
self.dirpath = datapath("io", "data")
self.frame = _frame.copy()
self.frame2 = _frame2.copy()
self.tsframe = _tsframe.copy()
self.mixed_frame = _mixed_frame.copy()
def get_csv_refdf(self, basename):
"""
Obtain the reference data from read_csv with the Python engine.
Parameters
----------
basename : str
File base name, excluding file extension.
Returns
-------
dfref : DataFrame
"""
pref = os.path.join(self.dirpath, basename + '.csv')
dfref = read_csv(pref, index_col=0, parse_dates=True, engine='python')
return dfref
def get_excelfile(self, basename, ext):
"""
Return test data ExcelFile instance.
Parameters
----------
basename : str
File base name, excluding file extension.
Returns
-------
excel : io.excel.ExcelFile
"""
return ExcelFile(os.path.join(self.dirpath, basename + ext))
def get_exceldf(self, basename, ext, *args, **kwds):
"""
Return test data DataFrame.
Parameters
----------
basename : str
File base name, excluding file extension.
Returns
-------
df : DataFrame
"""
pth = os.path.join(self.dirpath, basename + ext)
return read_excel(pth, *args, **kwds)
class ReadingTestsBase(SharedItems):
# This is based on ExcelWriterBase
@td.skip_if_no("xlrd", "1.0.1") # see gh-22682
def test_usecols_int(self, ext):
df_ref = self.get_csv_refdf("test1")
df_ref = df_ref.reindex(columns=["A", "B", "C"])
# usecols as int
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
df1 = self.get_exceldf("test1", ext, "Sheet1",
index_col=0, usecols=3)
# usecols as int
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
df2 = self.get_exceldf("test1", ext, "Sheet2", skiprows=[1],
index_col=0, usecols=3)
# parse_cols instead of usecols, usecols as int
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
df3 = self.get_exceldf("test1", ext, "Sheet2", skiprows=[1],
index_col=0, parse_cols=3)
# TODO add index to xls file)
tm.assert_frame_equal(df1, df_ref, check_names=False)
tm.assert_frame_equal(df2, df_ref, check_names=False)
tm.assert_frame_equal(df3, df_ref, check_names=False)
@td.skip_if_no('xlrd', '1.0.1') # GH-22682
def test_usecols_list(self, ext):
dfref = self.get_csv_refdf('test1')
dfref = dfref.reindex(columns=['B', 'C'])
df1 = self.get_exceldf('test1', ext, 'Sheet1', index_col=0,
usecols=[0, 2, 3])
df2 = self.get_exceldf('test1', ext, 'Sheet2', skiprows=[1],
index_col=0, usecols=[0, 2, 3])
with tm.assert_produces_warning(FutureWarning):
df3 = self.get_exceldf('test1', ext, 'Sheet2', skiprows=[1],
index_col=0, parse_cols=[0, 2, 3])
# TODO add index to xls file)
tm.assert_frame_equal(df1, dfref, check_names=False)
tm.assert_frame_equal(df2, dfref, check_names=False)
tm.assert_frame_equal(df3, dfref, check_names=False)
@td.skip_if_no('xlrd', '1.0.1') # GH-22682
def test_usecols_str(self, ext):
dfref = self.get_csv_refdf('test1')
df1 = dfref.reindex(columns=['A', 'B', 'C'])
df2 = self.get_exceldf('test1', ext, 'Sheet1', index_col=0,
usecols='A:D')
df3 = self.get_exceldf('test1', ext, 'Sheet2', skiprows=[1],
index_col=0, usecols='A:D')
with tm.assert_produces_warning(FutureWarning):
df4 = self.get_exceldf('test1', ext, 'Sheet2', skiprows=[1],
index_col=0, parse_cols='A:D')
# TODO add index to xls, read xls ignores index name ?
tm.assert_frame_equal(df2, df1, check_names=False)
tm.assert_frame_equal(df3, df1, check_names=False)
tm.assert_frame_equal(df4, df1, check_names=False)
df1 = dfref.reindex(columns=['B', 'C'])
df2 = self.get_exceldf('test1', ext, 'Sheet1', index_col=0,
usecols='A,C,D')
df3 = self.get_exceldf('test1', ext, 'Sheet2', skiprows=[1],
index_col=0, usecols='A,C,D')
# TODO add index to xls file
tm.assert_frame_equal(df2, df1, check_names=False)
tm.assert_frame_equal(df3, df1, check_names=False)
df1 = dfref.reindex(columns=['B', 'C'])
df2 = self.get_exceldf('test1', ext, 'Sheet1', index_col=0,
usecols='A,C:D')
df3 = self.get_exceldf('test1', ext, 'Sheet2', skiprows=[1],
index_col=0, usecols='A,C:D')
tm.assert_frame_equal(df2, df1, check_names=False)
tm.assert_frame_equal(df3, df1, check_names=False)
@pytest.mark.parametrize("usecols", [
[0, 1, 3], [0, 3, 1],
[1, 0, 3], [1, 3, 0],
[3, 0, 1], [3, 1, 0],
])
def test_usecols_diff_positional_int_columns_order(self, ext, usecols):
expected = self.get_csv_refdf("test1")[["A", "C"]]
result = self.get_exceldf("test1", ext, "Sheet1",
index_col=0, usecols=usecols)
tm.assert_frame_equal(result, expected, check_names=False)
@pytest.mark.parametrize("usecols", [
["B", "D"], ["D", "B"]
])
def test_usecols_diff_positional_str_columns_order(self, ext, usecols):
expected = self.get_csv_refdf("test1")[["B", "D"]]
expected.index = range(len(expected))
result = self.get_exceldf("test1", ext, "Sheet1", usecols=usecols)
tm.assert_frame_equal(result, expected, check_names=False)
def test_read_excel_without_slicing(self, ext):
expected = self.get_csv_refdf("test1")
result = self.get_exceldf("test1", ext, "Sheet1", index_col=0)
tm.assert_frame_equal(result, expected, check_names=False)
def test_usecols_excel_range_str(self, ext):
expected = self.get_csv_refdf("test1")[["C", "D"]]
result = self.get_exceldf("test1", ext, "Sheet1",
index_col=0, usecols="A,D:E")
tm.assert_frame_equal(result, expected, check_names=False)
def test_usecols_excel_range_str_invalid(self, ext):
msg = "Invalid column name: E1"
with pytest.raises(ValueError, match=msg):
self.get_exceldf("test1", ext, "Sheet1", usecols="D:E1")
def test_index_col_label_error(self, ext):
msg = "list indices must be integers.*, not str"
with pytest.raises(TypeError, match=msg):
self.get_exceldf("test1", ext, "Sheet1", index_col=["A"],
usecols=["A", "C"])
def test_usecols_pass_non_existent_column(self, ext):
msg = ("Usecols do not match columns, "
"columns expected but not found: " + r"\['E'\]")
with pytest.raises(ValueError, match=msg):
self.get_exceldf("test1", ext, usecols=["E"])
def test_usecols_wrong_type(self, ext):
msg = ("'usecols' must either be list-like of "
"all strings, all unicode, all integers or a callable.")
with pytest.raises(ValueError, match=msg):
self.get_exceldf("test1", ext, usecols=["E1", 0])
def test_excel_stop_iterator(self, ext):
parsed = self.get_exceldf('test2', ext, 'Sheet1')
expected = DataFrame([['aaaa', 'bbbbb']], columns=['Test', 'Test1'])
tm.assert_frame_equal(parsed, expected)
def test_excel_cell_error_na(self, ext):
parsed = self.get_exceldf('test3', ext, 'Sheet1')
expected = DataFrame([[np.nan]], columns=['Test'])
tm.assert_frame_equal(parsed, expected)
def test_excel_passes_na(self, ext):
excel = self.get_excelfile('test4', ext)
parsed = read_excel(excel, 'Sheet1', keep_default_na=False,
na_values=['apple'])
expected = DataFrame([['NA'], [1], ['NA'], [np.nan], ['rabbit']],
columns=['Test'])
tm.assert_frame_equal(parsed, expected)
parsed = read_excel(excel, 'Sheet1', keep_default_na=True,
na_values=['apple'])
expected = DataFrame([[np.nan], [1], [np.nan], [np.nan], ['rabbit']],
columns=['Test'])
tm.assert_frame_equal(parsed, expected)
# 13967
excel = self.get_excelfile('test5', ext)
parsed = read_excel(excel, 'Sheet1', keep_default_na=False,
na_values=['apple'])
expected = DataFrame([['1.#QNAN'], [1], ['nan'], [np.nan], ['rabbit']],
columns=['Test'])
tm.assert_frame_equal(parsed, expected)
parsed = read_excel(excel, 'Sheet1', keep_default_na=True,
na_values=['apple'])
expected = DataFrame([[np.nan], [1], [np.nan], [np.nan], ['rabbit']],
columns=['Test'])
tm.assert_frame_equal(parsed, expected)
@td.skip_if_no('xlrd', '1.0.1') # GH-22682
def test_deprecated_sheetname(self, ext):
# gh-17964
excel = self.get_excelfile('test1', ext)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
read_excel(excel, sheetname='Sheet1')
with pytest.raises(TypeError):
read_excel(excel, sheet='Sheet1')
@td.skip_if_no('xlrd', '1.0.1') # GH-22682
def test_excel_table_sheet_by_index(self, ext):
excel = self.get_excelfile('test1', ext)
dfref = self.get_csv_refdf('test1')
df1 = read_excel(excel, 0, index_col=0)
df2 = read_excel(excel, 1, skiprows=[1], index_col=0)
tm.assert_frame_equal(df1, dfref, check_names=False)
tm.assert_frame_equal(df2, dfref, check_names=False)
df1 = excel.parse(0, index_col=0)
df2 = excel.parse(1, skiprows=[1], index_col=0)
tm.assert_frame_equal(df1, dfref, check_names=False)
tm.assert_frame_equal(df2, dfref, check_names=False)
df3 = read_excel(excel, 0, index_col=0, skipfooter=1)
tm.assert_frame_equal(df3, df1.iloc[:-1])
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
df4 = read_excel(excel, 0, index_col=0, skip_footer=1)
tm.assert_frame_equal(df3, df4)
df3 = excel.parse(0, index_col=0, skipfooter=1)
tm.assert_frame_equal(df3, df1.iloc[:-1])
import xlrd
with pytest.raises(xlrd.XLRDError):
read_excel(excel, 'asdf')
def test_excel_table(self, ext):
dfref = self.get_csv_refdf('test1')
df1 = self.get_exceldf('test1', ext, 'Sheet1', index_col=0)
df2 = self.get_exceldf('test1', ext, 'Sheet2', skiprows=[1],
index_col=0)
# TODO add index to file
tm.assert_frame_equal(df1, dfref, check_names=False)
tm.assert_frame_equal(df2, dfref, check_names=False)
df3 = self.get_exceldf('test1', ext, 'Sheet1', index_col=0,
skipfooter=1)
tm.assert_frame_equal(df3, df1.iloc[:-1])
def test_reader_special_dtypes(self, ext):
expected = DataFrame.from_dict(OrderedDict([
("IntCol", [1, 2, -3, 4, 0]),
("FloatCol", [1.25, 2.25, 1.83, 1.92, 0.0000000005]),
("BoolCol", [True, False, True, True, False]),
("StrCol", [1, 2, 3, 4, 5]),
# GH5394 - this is why convert_float isn't vectorized
("Str2Col", ["a", 3, "c", "d", "e"]),
("DateCol", [datetime(2013, 10, 30), datetime(2013, 10, 31),
datetime(1905, 1, 1), datetime(2013, 12, 14),
datetime(2015, 3, 14)])
]))
basename = 'test_types'
# should read in correctly and infer types
actual = self.get_exceldf(basename, ext, 'Sheet1')
tm.assert_frame_equal(actual, expected)
# if not coercing number, then int comes in as float
float_expected = expected.copy()
float_expected["IntCol"] = float_expected["IntCol"].astype(float)
float_expected.loc[float_expected.index[1], "Str2Col"] = 3.0
actual = self.get_exceldf(basename, ext, 'Sheet1', convert_float=False)
tm.assert_frame_equal(actual, float_expected)
# check setting Index (assuming xls and xlsx are the same here)
for icol, name in enumerate(expected.columns):
actual = self.get_exceldf(basename, ext, 'Sheet1', index_col=icol)
exp = expected.set_index(name)
tm.assert_frame_equal(actual, exp)
# convert_float and converters should be different but both accepted
expected["StrCol"] = expected["StrCol"].apply(str)
actual = self.get_exceldf(
basename, ext, 'Sheet1', converters={"StrCol": str})
tm.assert_frame_equal(actual, expected)
no_convert_float = float_expected.copy()
no_convert_float["StrCol"] = no_convert_float["StrCol"].apply(str)
actual = self.get_exceldf(basename, ext, 'Sheet1', convert_float=False,
converters={"StrCol": str})
tm.assert_frame_equal(actual, no_convert_float)
# GH8212 - support for converters and missing values
def test_reader_converters(self, ext):
basename = 'test_converters'
expected = DataFrame.from_dict(OrderedDict([
("IntCol", [1, 2, -3, -1000, 0]),
("FloatCol", [12.5, np.nan, 18.3, 19.2, 0.000000005]),
("BoolCol", ['Found', 'Found', 'Found', 'Not found', 'Found']),
("StrCol", ['1', np.nan, '3', '4', '5']),
]))
converters = {'IntCol': lambda x: int(x) if x != '' else -1000,
'FloatCol': lambda x: 10 * x if x else np.nan,
2: lambda x: 'Found' if x != '' else 'Not found',
3: lambda x: str(x) if x else '',
}
# should read in correctly and set types of single cells (not array
# dtypes)
actual = self.get_exceldf(basename, ext, 'Sheet1',
converters=converters)
tm.assert_frame_equal(actual, expected)
def test_reader_dtype(self, ext):
# GH 8212
basename = 'testdtype'
actual = self.get_exceldf(basename, ext)
expected = DataFrame({
'a': [1, 2, 3, 4],
'b': [2.5, 3.5, 4.5, 5.5],
'c': [1, 2, 3, 4],
'd': [1.0, 2.0, np.nan, 4.0]}).reindex(
columns=['a', 'b', 'c', 'd'])
tm.assert_frame_equal(actual, expected)
actual = self.get_exceldf(basename, ext,
dtype={'a': 'float64',
'b': 'float32',
'c': str})
expected['a'] = expected['a'].astype('float64')
expected['b'] = expected['b'].astype('float32')
expected['c'] = ['001', '002', '003', '004']
tm.assert_frame_equal(actual, expected)
with pytest.raises(ValueError):
self.get_exceldf(basename, ext, dtype={'d': 'int64'})
@pytest.mark.parametrize("dtype,expected", [
(None,
DataFrame({
"a": [1, 2, 3, 4],
"b": [2.5, 3.5, 4.5, 5.5],
"c": [1, 2, 3, 4],
"d": [1.0, 2.0, np.nan, 4.0]
})),
({"a": "float64",
"b": "float32",
"c": str,
"d": str
},
DataFrame({
"a": Series([1, 2, 3, 4], dtype="float64"),
"b": Series([2.5, 3.5, 4.5, 5.5], dtype="float32"),
"c": ["001", "002", "003", "004"],
"d": ["1", "2", np.nan, "4"]
})),
])
def test_reader_dtype_str(self, ext, dtype, expected):
# see gh-20377
basename = "testdtype"
actual = self.get_exceldf(basename, ext, dtype=dtype)
tm.assert_frame_equal(actual, expected)
def test_reading_all_sheets(self, ext):
# Test reading all sheetnames by setting sheetname to None,
# Ensure a dict is returned.
# See PR #9450
basename = 'test_multisheet'
dfs = self.get_exceldf(basename, ext, sheet_name=None)
# ensure this is not alphabetical to test order preservation
expected_keys = ['Charlie', 'Alpha', 'Beta']
tm.assert_contains_all(expected_keys, dfs.keys())
# Issue 9930
# Ensure sheet order is preserved
assert expected_keys == list(dfs.keys())
def test_reading_multiple_specific_sheets(self, ext):
# Test reading specific sheetnames by specifying a mixed list
# of integers and strings, and confirm that duplicated sheet
# references (positions/names) are removed properly.
# Ensure a dict is returned
# See PR #9450
basename = 'test_multisheet'
# Explicitly request duplicates. Only the set should be returned.
expected_keys = [2, 'Charlie', 'Charlie']
dfs = self.get_exceldf(basename, ext, sheet_name=expected_keys)
expected_keys = list(set(expected_keys))
tm.assert_contains_all(expected_keys, dfs.keys())
assert len(expected_keys) == len(dfs.keys())
def test_reading_all_sheets_with_blank(self, ext):
# Test reading all sheetnames by setting sheetname to None,
# In the case where some sheets are blank.
# Issue #11711
basename = 'blank_with_header'
dfs = self.get_exceldf(basename, ext, sheet_name=None)
expected_keys = ['Sheet1', 'Sheet2', 'Sheet3']
tm.assert_contains_all(expected_keys, dfs.keys())
# GH6403
def test_read_excel_blank(self, ext):
actual = self.get_exceldf('blank', ext, 'Sheet1')
tm.assert_frame_equal(actual, DataFrame())
def test_read_excel_blank_with_header(self, ext):
expected = DataFrame(columns=['col_1', 'col_2'])
actual = self.get_exceldf('blank_with_header', ext, 'Sheet1')
tm.assert_frame_equal(actual, expected)
@td.skip_if_no("xlwt")
@td.skip_if_no("openpyxl")
@pytest.mark.parametrize("header,expected", [
(None, DataFrame([np.nan] * 4)),
(0, DataFrame({"Unnamed: 0": [np.nan] * 3}))
])
def test_read_one_empty_col_no_header(self, ext, header, expected):
# xref gh-12292
filename = "no_header"
df = pd.DataFrame(
[["", 1, 100],
["", 2, 200],
["", 3, 300],
["", 4, 400]]
)
with ensure_clean(ext) as path:
df.to_excel(path, filename, index=False, header=False)
result = read_excel(path, filename, usecols=[0], header=header)
tm.assert_frame_equal(result, expected)
@td.skip_if_no("xlwt")
@td.skip_if_no("openpyxl")
@pytest.mark.parametrize("header,expected", [
(None, DataFrame([0] + [np.nan] * 4)),
(0, DataFrame([np.nan] * 4))
])
def test_read_one_empty_col_with_header(self, ext, header, expected):
filename = "with_header"
df = pd.DataFrame(
[["", 1, 100],
["", 2, 200],
["", 3, 300],
["", 4, 400]]
)
with ensure_clean(ext) as path:
df.to_excel(path, 'with_header', index=False, header=True)
result = read_excel(path, filename, usecols=[0], header=header)
tm.assert_frame_equal(result, expected)
@td.skip_if_no('openpyxl')
@td.skip_if_no('xlwt')
def test_set_column_names_in_parameter(self, ext):
# GH 12870 : pass down column names associated with
# keyword argument names
refdf = pd.DataFrame([[1, 'foo'], [2, 'bar'],
[3, 'baz']], columns=['a', 'b'])
with ensure_clean(ext) as pth:
with ExcelWriter(pth) as writer:
refdf.to_excel(writer, 'Data_no_head',
header=False, index=False)
refdf.to_excel(writer, 'Data_with_head', index=False)
refdf.columns = ['A', 'B']
with ExcelFile(pth) as reader:
xlsdf_no_head = read_excel(reader, 'Data_no_head',
header=None, names=['A', 'B'])
xlsdf_with_head = read_excel(reader, 'Data_with_head',
index_col=None, names=['A', 'B'])
tm.assert_frame_equal(xlsdf_no_head, refdf)
tm.assert_frame_equal(xlsdf_with_head, refdf)
def test_date_conversion_overflow(self, ext):
# GH 10001 : pandas.ExcelFile ignore parse_dates=False
expected = pd.DataFrame([[pd.Timestamp('2016-03-12'), 'Marc Johnson'],
[pd.Timestamp('2016-03-16'), 'Jack Black'],
[1e+20, 'Timothy Brown']],
columns=['DateColWithBigInt', 'StringCol'])
result = self.get_exceldf('testdateoverflow', ext)
tm.assert_frame_equal(result, expected)
@td.skip_if_no("xlrd", "1.0.1") # see gh-22682
def test_sheet_name_and_sheetname(self, ext):
# gh-10559: Minor improvement: Change "sheet_name" to "sheetname"
# gh-10969: DOC: Consistent var names (sheetname vs sheet_name)
# gh-12604: CLN GH10559 Rename sheetname variable to sheet_name
# gh-20920: ExcelFile.parse() and pd.read_xlsx() have different
# behavior for "sheetname" argument
filename = "test1"
sheet_name = "Sheet1"
df_ref = self.get_csv_refdf(filename)
df1 = self.get_exceldf(filename, ext,
sheet_name=sheet_name, index_col=0) # doc
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
df2 = self.get_exceldf(filename, ext, index_col=0,
sheetname=sheet_name) # backward compat
excel = self.get_excelfile(filename, ext)
df1_parse = excel.parse(sheet_name=sheet_name, index_col=0) # doc
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
df2_parse = excel.parse(index_col=0,
sheetname=sheet_name) # backward compat
tm.assert_frame_equal(df1, df_ref, check_names=False)
tm.assert_frame_equal(df2, df_ref, check_names=False)
tm.assert_frame_equal(df1_parse, df_ref, check_names=False)
tm.assert_frame_equal(df2_parse, df_ref, check_names=False)
def test_sheet_name_both_raises(self, ext):
with pytest.raises(TypeError, match="Cannot specify both"):
self.get_exceldf('test1', ext, sheetname='Sheet1',
sheet_name='Sheet1')
excel = self.get_excelfile('test1', ext)
with pytest.raises(TypeError, match="Cannot specify both"):
excel.parse(sheetname='Sheet1',
sheet_name='Sheet1')
@pytest.mark.parametrize("ext", ['.xls', '.xlsx', '.xlsm'])
class TestXlrdReader(ReadingTestsBase):
"""
This is the base class for the xlrd tests, and 3 different file formats
are supported: xls, xlsx, xlsm
"""
def test_excel_read_buffer(self, ext):
pth = os.path.join(self.dirpath, 'test1' + ext)
expected = read_excel(pth, 'Sheet1', index_col=0)
with open(pth, 'rb') as f:
actual = read_excel(f, 'Sheet1', index_col=0)
tm.assert_frame_equal(expected, actual)
with open(pth, 'rb') as f:
xls = ExcelFile(f)
actual = read_excel(xls, 'Sheet1', index_col=0)
tm.assert_frame_equal(expected, actual)
@td.skip_if_no("xlwt")
def test_read_xlrd_book(self, ext):
import xlrd
df = self.frame
engine = "xlrd"
sheet_name = "SheetA"
with ensure_clean(ext) as pth:
df.to_excel(pth, sheet_name)
book = xlrd.open_workbook(pth)
with ExcelFile(book, engine=engine) as xl:
result = read_excel(xl, sheet_name, index_col=0)
tm.assert_frame_equal(df, result)
result = read_excel(book, sheet_name=sheet_name,
engine=engine, index_col=0)
tm.assert_frame_equal(df, result)
@tm.network
def test_read_from_http_url(self, ext):
url = ('https://raw.github.com/pandas-dev/pandas/master/'
'pandas/tests/io/data/test1' + ext)
url_table = read_excel(url)
local_table = self.get_exceldf('test1', ext)
tm.assert_frame_equal(url_table, local_table)
@td.skip_if_no("s3fs")
@td.skip_if_not_us_locale
def test_read_from_s3_url(self, ext):
moto = pytest.importorskip("moto")
boto3 = pytest.importorskip("boto3")
with moto.mock_s3():
conn = boto3.resource("s3", region_name="us-east-1")
conn.create_bucket(Bucket="pandas-test")
file_name = os.path.join(self.dirpath, 'test1' + ext)
with open(file_name, "rb") as f:
conn.Bucket("pandas-test").put_object(Key="test1" + ext,
Body=f)
url = ('s3://pandas-test/test1' + ext)
url_table = read_excel(url)
local_table = self.get_exceldf('test1', ext)
tm.assert_frame_equal(url_table, local_table)
@pytest.mark.slow
# ignore warning from old xlrd
@pytest.mark.filterwarnings("ignore:This metho:PendingDeprecationWarning")
def test_read_from_file_url(self, ext):
# FILE
localtable = os.path.join(self.dirpath, 'test1' + ext)
local_table = read_excel(localtable)
try:
url_table = read_excel('file://localhost/' + localtable)
except URLError:
# fails on some systems
import platform
pytest.skip("failing on %s" %
' '.join(platform.uname()).strip())
tm.assert_frame_equal(url_table, local_table)
@td.skip_if_no('pathlib')
def test_read_from_pathlib_path(self, ext):
# GH12655
from pathlib import Path
str_path = os.path.join(self.dirpath, 'test1' + ext)
expected = read_excel(str_path, 'Sheet1', index_col=0)
path_obj = Path(self.dirpath, 'test1' + ext)
actual = read_excel(path_obj, 'Sheet1', index_col=0)
tm.assert_frame_equal(expected, actual)
@td.skip_if_no('py.path')
def test_read_from_py_localpath(self, ext):
# GH12655
from py.path import local as LocalPath
str_path = os.path.join(self.dirpath, 'test1' + ext)
expected = read_excel(str_path, 'Sheet1', index_col=0)
abs_dir = os.path.abspath(self.dirpath)
path_obj = LocalPath(abs_dir).join('test1' + ext)
actual = read_excel(path_obj, 'Sheet1', index_col=0)
tm.assert_frame_equal(expected, actual)
def test_reader_closes_file(self, ext):
pth = os.path.join(self.dirpath, 'test1' + ext)
f = open(pth, 'rb')
with ExcelFile(f) as xlsx:
# parses okay
read_excel(xlsx, 'Sheet1', index_col=0)
assert f.closed
@td.skip_if_no("xlwt")
@td.skip_if_no("openpyxl")
def test_creating_and_reading_multiple_sheets(self, ext):
# see gh-9450
#
# Test reading multiple sheets, from a runtime
# created Excel file with multiple sheets.
def tdf(col_sheet_name):
d, i = [11, 22, 33], [1, 2, 3]
return DataFrame(d, i, columns=[col_sheet_name])
sheets = ["AAA", "BBB", "CCC"]
dfs = [tdf(s) for s in sheets]
dfs = dict(zip(sheets, dfs))
with ensure_clean(ext) as pth:
with ExcelWriter(pth) as ew:
for sheetname, df in iteritems(dfs):
df.to_excel(ew, sheetname)
dfs_returned = read_excel(pth, sheet_name=sheets, index_col=0)
for s in sheets:
tm.assert_frame_equal(dfs[s], dfs_returned[s])
def test_reader_seconds(self, ext):
import xlrd
# Test reading times with and without milliseconds. GH5945.
if LooseVersion(xlrd.__VERSION__) >= LooseVersion("0.9.3"):
# Xlrd >= 0.9.3 can handle Excel milliseconds.
expected = DataFrame.from_dict({"Time": [time(1, 2, 3),
time(2, 45, 56, 100000),
time(4, 29, 49, 200000),
time(6, 13, 42, 300000),
time(7, 57, 35, 400000),
time(9, 41, 28, 500000),
time(11, 25, 21, 600000),
time(13, 9, 14, 700000),
time(14, 53, 7, 800000),
time(16, 37, 0, 900000),
time(18, 20, 54)]})
else:
# Xlrd < 0.9.3 rounds Excel milliseconds.
expected = DataFrame.from_dict({"Time": [time(1, 2, 3),
time(2, 45, 56),
time(4, 29, 49),
time(6, 13, 42),
time(7, 57, 35),
time(9, 41, 29),
time(11, 25, 22),
time(13, 9, 15),
time(14, 53, 8),
time(16, 37, 1),
time(18, 20, 54)]})
actual = self.get_exceldf('times_1900', ext, 'Sheet1')
tm.assert_frame_equal(actual, expected)
actual = self.get_exceldf('times_1904', ext, 'Sheet1')
tm.assert_frame_equal(actual, expected)
def test_read_excel_multiindex(self, ext):
# see gh-4679
mi = MultiIndex.from_product([["foo", "bar"], ["a", "b"]])
mi_file = os.path.join(self.dirpath, "testmultiindex" + ext)
# "mi_column" sheet
expected = DataFrame([[1, 2.5, pd.Timestamp("2015-01-01"), True],
[2, 3.5, pd.Timestamp("2015-01-02"), False],
[3, 4.5, pd.Timestamp("2015-01-03"), False],
[4, 5.5, pd.Timestamp("2015-01-04"), True]],
columns=mi)
actual = read_excel(mi_file, "mi_column", header=[0, 1], index_col=0)
tm.assert_frame_equal(actual, expected)
# "mi_index" sheet
expected.index = mi
expected.columns = ["a", "b", "c", "d"]
actual = read_excel(mi_file, "mi_index", index_col=[0, 1])
tm.assert_frame_equal(actual, expected, check_names=False)
# "both" sheet
expected.columns = mi
actual = read_excel(mi_file, "both", index_col=[0, 1], header=[0, 1])
tm.assert_frame_equal(actual, expected, check_names=False)
# "mi_index_name" sheet
expected.columns = ["a", "b", "c", "d"]
expected.index = mi.set_names(["ilvl1", "ilvl2"])
actual = read_excel(mi_file, "mi_index_name", index_col=[0, 1])
tm.assert_frame_equal(actual, expected)
# "mi_column_name" sheet
expected.index = list(range(4))
expected.columns = mi.set_names(["c1", "c2"])
actual = read_excel(mi_file, "mi_column_name",
header=[0, 1], index_col=0)
tm.assert_frame_equal(actual, expected)
# see gh-11317
# "name_with_int" sheet
expected.columns = mi.set_levels(
[1, 2], level=1).set_names(["c1", "c2"])
actual = read_excel(mi_file, "name_with_int",
index_col=0, header=[0, 1])
tm.assert_frame_equal(actual, expected)
# "both_name" sheet
expected.columns = mi.set_names(["c1", "c2"])
expected.index = mi.set_names(["ilvl1", "ilvl2"])
actual = read_excel(mi_file, "both_name",
index_col=[0, 1], header=[0, 1])
tm.assert_frame_equal(actual, expected)
# "both_skiprows" sheet
actual = read_excel(mi_file, "both_name_skiprows", index_col=[0, 1],
header=[0, 1], skiprows=2)
tm.assert_frame_equal(actual, expected)
@td.skip_if_no("xlsxwriter")
def test_read_excel_multiindex_empty_level(self, ext):
# see gh-12453
with ensure_clean(ext) as path:
df = DataFrame({
("One", "x"): {0: 1},
("Two", "X"): {0: 3},
("Two", "Y"): {0: 7},
("Zero", ""): {0: 0}
})
expected = DataFrame({
("One", u"x"): {0: 1},
("Two", u"X"): {0: 3},
("Two", u"Y"): {0: 7},
("Zero", "Unnamed: 4_level_1"): {0: 0}
})
df.to_excel(path)
actual = pd.read_excel(path, header=[0, 1], index_col=0)
tm.assert_frame_equal(actual, expected)
df = pd.DataFrame({
("Beg", ""): {0: 0},
("Middle", "x"): {0: 1},
("Tail", "X"): {0: 3},
("Tail", "Y"): {0: 7}
})
expected = pd.DataFrame({
("Beg", "Unnamed: 1_level_1"): {0: 0},
("Middle", u"x"): {0: 1},
("Tail", u"X"): {0: 3},
("Tail", u"Y"): {0: 7}
})
df.to_excel(path)
actual = pd.read_excel(path, header=[0, 1], index_col=0)
tm.assert_frame_equal(actual, expected)
@td.skip_if_no("xlsxwriter")
@pytest.mark.parametrize("c_idx_names", [True, False])
@pytest.mark.parametrize("r_idx_names", [True, False])
@pytest.mark.parametrize("c_idx_levels", [1, 3])
@pytest.mark.parametrize("r_idx_levels", [1, 3])
def test_excel_multindex_roundtrip(self, ext, c_idx_names, r_idx_names,
c_idx_levels, r_idx_levels):
# see gh-4679
with ensure_clean(ext) as pth:
if c_idx_levels == 1 and c_idx_names:
pytest.skip("Column index name cannot be "
"serialized unless it's a MultiIndex")
# Empty name case current read in as
# unnamed levels, not Nones.
check_names = r_idx_names or r_idx_levels <= 1
df = mkdf(5, 5, c_idx_names, r_idx_names,
c_idx_levels, r_idx_levels)
df.to_excel(pth)
act = pd.read_excel(pth, index_col=list(range(r_idx_levels)),
header=list(range(c_idx_levels)))
tm.assert_frame_equal(df, act, check_names=check_names)
df.iloc[0, :] = np.nan
df.to_excel(pth)
act = pd.read_excel(pth, index_col=list(range(r_idx_levels)),
header=list(range(c_idx_levels)))
tm.assert_frame_equal(df, act, check_names=check_names)
df.iloc[-1, :] = np.nan
df.to_excel(pth)
act = pd.read_excel(pth, index_col=list(range(r_idx_levels)),
header=list(range(c_idx_levels)))
tm.assert_frame_equal(df, act, check_names=check_names)
def test_excel_old_index_format(self, ext):
# see gh-4679
filename = "test_index_name_pre17" + ext
in_file = os.path.join(self.dirpath, filename)
# We detect headers to determine if index names exist, so
# that "index" name in the "names" version of the data will
# now be interpreted as rows that include null data.
data = np.array([[None, None, None, None, None],
["R0C0", "R0C1", "R0C2", "R0C3", "R0C4"],
["R1C0", "R1C1", "R1C2", "R1C3", "R1C4"],
["R2C0", "R2C1", "R2C2", "R2C3", "R2C4"],
["R3C0", "R3C1", "R3C2", "R3C3", "R3C4"],
["R4C0", "R4C1", "R4C2", "R4C3", "R4C4"]])
columns = ["C_l0_g0", "C_l0_g1", "C_l0_g2", "C_l0_g3", "C_l0_g4"]
mi = MultiIndex(levels=[["R0", "R_l0_g0", "R_l0_g1",
"R_l0_g2", "R_l0_g3", "R_l0_g4"],
["R1", "R_l1_g0", "R_l1_g1",
"R_l1_g2", "R_l1_g3", "R_l1_g4"]],
labels=[[0, 1, 2, 3, 4, 5], [0, 1, 2, 3, 4, 5]],
names=[None, None])
si = Index(["R0", "R_l0_g0", "R_l0_g1", "R_l0_g2",
"R_l0_g3", "R_l0_g4"], name=None)
expected = pd.DataFrame(data, index=si, columns=columns)
actual = pd.read_excel(in_file, "single_names", index_col=0)
tm.assert_frame_equal(actual, expected)
expected.index = mi
actual = pd.read_excel(in_file, "multi_names", index_col=[0, 1])
tm.assert_frame_equal(actual, expected)
# The analogous versions of the "names" version data
# where there are explicitly no names for the indices.
data = np.array([["R0C0", "R0C1", "R0C2", "R0C3", "R0C4"],
["R1C0", "R1C1", "R1C2", "R1C3", "R1C4"],
["R2C0", "R2C1", "R2C2", "R2C3", "R2C4"],
["R3C0", "R3C1", "R3C2", "R3C3", "R3C4"],
["R4C0", "R4C1", "R4C2", "R4C3", "R4C4"]])
columns = ["C_l0_g0", "C_l0_g1", "C_l0_g2", "C_l0_g3", "C_l0_g4"]
mi = MultiIndex(levels=[["R_l0_g0", "R_l0_g1", "R_l0_g2",
"R_l0_g3", "R_l0_g4"],
["R_l1_g0", "R_l1_g1", "R_l1_g2",
"R_l1_g3", "R_l1_g4"]],
labels=[[0, 1, 2, 3, 4], [0, 1, 2, 3, 4]],
names=[None, None])
si = Index(["R_l0_g0", "R_l0_g1", "R_l0_g2",
"R_l0_g3", "R_l0_g4"], name=None)
expected = pd.DataFrame(data, index=si, columns=columns)
actual = pd.read_excel(in_file, "single_no_names", index_col=0)
tm.assert_frame_equal(actual, expected)
expected.index = mi
actual = pd.read_excel(in_file, "multi_no_names", index_col=[0, 1])
tm.assert_frame_equal(actual, expected, check_names=False)
def test_read_excel_bool_header_arg(self, ext):
# GH 6114
for arg in [True, False]:
with pytest.raises(TypeError):
pd.read_excel(os.path.join(self.dirpath, 'test1' + ext),
header=arg)
def test_read_excel_chunksize(self, ext):
# GH 8011
with pytest.raises(NotImplementedError):
pd.read_excel(os.path.join(self.dirpath, 'test1' + ext),
chunksize=100)
@td.skip_if_no("xlwt")
@td.skip_if_no("openpyxl")
def test_read_excel_parse_dates(self, ext):
# see gh-11544, gh-12051
df = DataFrame(
{"col": [1, 2, 3],
"date_strings": pd.date_range("2012-01-01", periods=3)})
df2 = df.copy()
df2["date_strings"] = df2["date_strings"].dt.strftime("%m/%d/%Y")
with ensure_clean(ext) as pth:
df2.to_excel(pth)
res = read_excel(pth, index_col=0)
tm.assert_frame_equal(df2, res)
res = read_excel(pth, parse_dates=["date_strings"], index_col=0)
tm.assert_frame_equal(df, res)
date_parser = lambda x: pd.datetime.strptime(x, "%m/%d/%Y")
res = read_excel(pth, parse_dates=["date_strings"],
date_parser=date_parser, index_col=0)
tm.assert_frame_equal(df, res)
def test_read_excel_skiprows_list(self, ext):
# GH 4903
actual = pd.read_excel(os.path.join(self.dirpath,
'testskiprows' + ext),
'skiprows_list', skiprows=[0, 2])
expected = DataFrame([[1, 2.5, pd.Timestamp('2015-01-01'), True],
[2, 3.5, pd.Timestamp('2015-01-02'), False],
[3, 4.5, pd.Timestamp('2015-01-03'), False],
[4, 5.5, pd.Timestamp('2015-01-04'), True]],
columns=['a', 'b', 'c', 'd'])
tm.assert_frame_equal(actual, expected)
actual = pd.read_excel(os.path.join(self.dirpath,
'testskiprows' + ext),
'skiprows_list', skiprows=np.array([0, 2]))
tm.assert_frame_equal(actual, expected)
def test_read_excel_nrows(self, ext):
# GH 16645
num_rows_to_pull = 5
actual = pd.read_excel(os.path.join(self.dirpath, 'test1' + ext),
nrows=num_rows_to_pull)
expected = pd.read_excel(os.path.join(self.dirpath,
'test1' + ext))
expected = expected[:num_rows_to_pull]
tm.assert_frame_equal(actual, expected)
def test_read_excel_nrows_greater_than_nrows_in_file(self, ext):
# GH 16645
expected = pd.read_excel(os.path.join(self.dirpath,
'test1' + ext))
num_records_in_file = len(expected)
num_rows_to_pull = num_records_in_file + 10
actual = pd.read_excel(os.path.join(self.dirpath, 'test1' + ext),
nrows=num_rows_to_pull)
tm.assert_frame_equal(actual, expected)
def test_read_excel_nrows_non_integer_parameter(self, ext):
# GH 16645
msg = "'nrows' must be an integer >=0"
with pytest.raises(ValueError, match=msg):
pd.read_excel(os.path.join(self.dirpath, 'test1' + ext),
nrows='5')
def test_read_excel_squeeze(self, ext):
# GH 12157
f = os.path.join(self.dirpath, 'test_squeeze' + ext)
actual = pd.read_excel(f, 'two_columns', index_col=0, squeeze=True)
expected = pd.Series([2, 3, 4], [4, 5, 6], name='b')
expected.index.name = 'a'
tm.assert_series_equal(actual, expected)
actual = pd.read_excel(f, 'two_columns', squeeze=True)
expected = pd.DataFrame({'a': [4, 5, 6],
'b': [2, 3, 4]})
tm.assert_frame_equal(actual, expected)
actual = pd.read_excel(f, 'one_column', squeeze=True)
expected = pd.Series([1, 2, 3], name='a')
tm.assert_series_equal(actual, expected)
class _WriterBase(SharedItems):
@pytest.fixture(autouse=True)
def set_engine_and_path(self, request, merge_cells, engine, ext):
"""Fixture to set engine and open file for use in each test case
Rather than requiring `engine=...` to be provided explicitly as an
argument in each test, this fixture sets a global option to dictate
which engine should be used to write Excel files. After executing
the test it rolls back said change to the global option.
It also uses a context manager to open a temporary excel file for
the function to write to, accessible via `self.path`
Notes
-----
This fixture will run as part of each test method defined in the
class and any subclasses, on account of the `autouse=True`
argument
"""
option_name = 'io.excel.{ext}.writer'.format(ext=ext.strip('.'))
prev_engine = get_option(option_name)
set_option(option_name, engine)
with ensure_clean(ext) as path:
self.path = path
yield
set_option(option_name, prev_engine) # Roll back option change
@pytest.mark.parametrize("merge_cells", [True, False])
@pytest.mark.parametrize("engine,ext", [
pytest.param('openpyxl', '.xlsx', marks=pytest.mark.skipif(
not td.safe_import('openpyxl'), reason='No openpyxl')),
pytest.param('openpyxl', '.xlsm', marks=pytest.mark.skipif(
not td.safe_import('openpyxl'), reason='No openpyxl')),
pytest.param('xlwt', '.xls', marks=pytest.mark.skipif(
not td.safe_import('xlwt'), reason='No xlwt')),
pytest.param('xlsxwriter', '.xlsx', marks=pytest.mark.skipif(
not td.safe_import('xlsxwriter'), reason='No xlsxwriter'))
])
class TestExcelWriter(_WriterBase):
# Base class for test cases to run with different Excel writers.
def test_excel_sheet_by_name_raise(self, *_):
import xlrd
gt = DataFrame(np.random.randn(10, 2))
gt.to_excel(self.path)
xl = ExcelFile(self.path)
df = read_excel(xl, 0, index_col=0)
tm.assert_frame_equal(gt, df)
with pytest.raises(xlrd.XLRDError):
read_excel(xl, "0")
def test_excel_writer_context_manager(self, *_):
with ExcelWriter(self.path) as writer:
self.frame.to_excel(writer, "Data1")
self.frame2.to_excel(writer, "Data2")
with ExcelFile(self.path) as reader:
found_df = read_excel(reader, "Data1", index_col=0)
found_df2 = read_excel(reader, "Data2", index_col=0)
tm.assert_frame_equal(found_df, self.frame)
tm.assert_frame_equal(found_df2, self.frame2)
def test_roundtrip(self, merge_cells, engine, ext):
self.frame['A'][:5] = nan
self.frame.to_excel(self.path, 'test1')
self.frame.to_excel(self.path, 'test1', columns=['A', 'B'])
self.frame.to_excel(self.path, 'test1', header=False)
self.frame.to_excel(self.path, 'test1', index=False)
# test roundtrip
self.frame.to_excel(self.path, 'test1')
recons = read_excel(self.path, 'test1', index_col=0)
tm.assert_frame_equal(self.frame, recons)
self.frame.to_excel(self.path, 'test1', index=False)
recons = read_excel(self.path, 'test1', index_col=None)
recons.index = self.frame.index
tm.assert_frame_equal(self.frame, recons)
self.frame.to_excel(self.path, 'test1', na_rep='NA')
recons = read_excel(self.path, 'test1', index_col=0, na_values=['NA'])
tm.assert_frame_equal(self.frame, recons)
# GH 3611
self.frame.to_excel(self.path, 'test1', na_rep='88')
recons = read_excel(self.path, 'test1', index_col=0, na_values=['88'])
tm.assert_frame_equal(self.frame, recons)
self.frame.to_excel(self.path, 'test1', na_rep='88')
recons = read_excel(self.path, 'test1', index_col=0,
na_values=[88, 88.0])
tm.assert_frame_equal(self.frame, recons)
# GH 6573
self.frame.to_excel(self.path, 'Sheet1')
recons = read_excel(self.path, index_col=0)
tm.assert_frame_equal(self.frame, recons)
self.frame.to_excel(self.path, '0')
recons = read_excel(self.path, index_col=0)
tm.assert_frame_equal(self.frame, recons)
# GH 8825 Pandas Series should provide to_excel method
s = self.frame["A"]
s.to_excel(self.path)
recons = read_excel(self.path, index_col=0)
tm.assert_frame_equal(s.to_frame(), recons)
def test_mixed(self, merge_cells, engine, ext):
self.mixed_frame.to_excel(self.path, 'test1')
reader = ExcelFile(self.path)
recons = read_excel(reader, 'test1', index_col=0)
tm.assert_frame_equal(self.mixed_frame, recons)
def test_ts_frame(self, *_):
df = tm.makeTimeDataFrame()[:5]
df.to_excel(self.path, "test1")
reader = ExcelFile(self.path)
recons = read_excel(reader, "test1", index_col=0)
tm.assert_frame_equal(df, recons)
def test_basics_with_nan(self, merge_cells, engine, ext):
self.frame['A'][:5] = nan
self.frame.to_excel(self.path, 'test1')
self.frame.to_excel(self.path, 'test1', columns=['A', 'B'])
self.frame.to_excel(self.path, 'test1', header=False)
self.frame.to_excel(self.path, 'test1', index=False)
@pytest.mark.parametrize("np_type", [
np.int8, np.int16, np.int32, np.int64])
def test_int_types(self, merge_cells, engine, ext, np_type):
# Test np.int values read come back as int
# (rather than float which is Excel's format).
frame = DataFrame(np.random.randint(-10, 10, size=(10, 2)),
dtype=np_type)
frame.to_excel(self.path, "test1")
reader = ExcelFile(self.path)
recons = read_excel(reader, "test1", index_col=0)
int_frame = frame.astype(np.int64)
tm.assert_frame_equal(int_frame, recons)
recons2 = read_excel(self.path, "test1", index_col=0)
tm.assert_frame_equal(int_frame, recons2)
# Test with convert_float=False comes back as float.
float_frame = frame.astype(float)
recons = read_excel(self.path, "test1",
convert_float=False, index_col=0)
tm.assert_frame_equal(recons, float_frame,
check_index_type=False,
check_column_type=False)
@pytest.mark.parametrize("np_type", [
np.float16, np.float32, np.float64])
def test_float_types(self, merge_cells, engine, ext, np_type):
# Test np.float values read come back as float.
frame = DataFrame(np.random.random_sample(10), dtype=np_type)
frame.to_excel(self.path, "test1")
reader = ExcelFile(self.path)
recons = read_excel(reader, "test1", index_col=0).astype(np_type)
tm.assert_frame_equal(frame, recons, check_dtype=False)
@pytest.mark.parametrize("np_type", [np.bool8, np.bool_])
def test_bool_types(self, merge_cells, engine, ext, np_type):
# Test np.bool values read come back as float.
frame = (DataFrame([1, 0, True, False], dtype=np_type))
frame.to_excel(self.path, "test1")
reader = ExcelFile(self.path)
recons = read_excel(reader, "test1", index_col=0).astype(np_type)
tm.assert_frame_equal(frame, recons)
def test_inf_roundtrip(self, *_):
frame = DataFrame([(1, np.inf), (2, 3), (5, -np.inf)])
frame.to_excel(self.path, "test1")
reader = ExcelFile(self.path)
recons = read_excel(reader, "test1", index_col=0)
tm.assert_frame_equal(frame, recons)
def test_sheets(self, merge_cells, engine, ext):
self.frame['A'][:5] = nan
self.frame.to_excel(self.path, 'test1')
self.frame.to_excel(self.path, 'test1', columns=['A', 'B'])
self.frame.to_excel(self.path, 'test1', header=False)
self.frame.to_excel(self.path, 'test1', index=False)
# Test writing to separate sheets
writer = ExcelWriter(self.path)
self.frame.to_excel(writer, 'test1')
self.tsframe.to_excel(writer, 'test2')
writer.save()
reader = ExcelFile(self.path)
recons = read_excel(reader, 'test1', index_col=0)
tm.assert_frame_equal(self.frame, recons)
recons = read_excel(reader, 'test2', index_col=0)
tm.assert_frame_equal(self.tsframe, recons)
assert 2 == len(reader.sheet_names)
assert 'test1' == reader.sheet_names[0]
assert 'test2' == reader.sheet_names[1]
def test_colaliases(self, merge_cells, engine, ext):
self.frame['A'][:5] = nan
self.frame.to_excel(self.path, 'test1')
self.frame.to_excel(self.path, 'test1', columns=['A', 'B'])
self.frame.to_excel(self.path, 'test1', header=False)
self.frame.to_excel(self.path, 'test1', index=False)
# column aliases
col_aliases = Index(['AA', 'X', 'Y', 'Z'])
self.frame2.to_excel(self.path, 'test1', header=col_aliases)
reader = ExcelFile(self.path)
rs = read_excel(reader, 'test1', index_col=0)
xp = self.frame2.copy()
xp.columns = col_aliases
tm.assert_frame_equal(xp, rs)
def test_roundtrip_indexlabels(self, merge_cells, engine, ext):
self.frame['A'][:5] = nan
self.frame.to_excel(self.path, 'test1')
self.frame.to_excel(self.path, 'test1', columns=['A', 'B'])
self.frame.to_excel(self.path, 'test1', header=False)
self.frame.to_excel(self.path, 'test1', index=False)
# test index_label
frame = (DataFrame(np.random.randn(10, 2)) >= 0)
frame.to_excel(self.path, 'test1',
index_label=['test'],
merge_cells=merge_cells)
reader = ExcelFile(self.path)
recons = read_excel(reader, 'test1',
index_col=0,
).astype(np.int64)
frame.index.names = ['test']
assert frame.index.names == recons.index.names
frame = (DataFrame(np.random.randn(10, 2)) >= 0)
frame.to_excel(self.path,
'test1',
index_label=['test', 'dummy', 'dummy2'],
merge_cells=merge_cells)
reader = ExcelFile(self.path)
recons = read_excel(reader, 'test1',
index_col=0,
).astype(np.int64)
frame.index.names = ['test']
assert frame.index.names == recons.index.names
frame = (DataFrame(np.random.randn(10, 2)) >= 0)
frame.to_excel(self.path,
'test1',
index_label='test',
merge_cells=merge_cells)
reader = ExcelFile(self.path)
recons = read_excel(reader, 'test1',
index_col=0,
).astype(np.int64)
frame.index.names = ['test']
tm.assert_frame_equal(frame, recons.astype(bool))
self.frame.to_excel(self.path,
'test1',
columns=['A', 'B', 'C', 'D'],
index=False, merge_cells=merge_cells)
# take 'A' and 'B' as indexes (same row as cols 'C', 'D')
df = self.frame.copy()
df = df.set_index(['A', 'B'])
reader = ExcelFile(self.path)
recons = read_excel(reader, 'test1', index_col=[0, 1])
tm.assert_frame_equal(df, recons, check_less_precise=True)
def test_excel_roundtrip_indexname(self, merge_cells, engine, ext):
df = DataFrame(np.random.randn(10, 4))
df.index.name = 'foo'
df.to_excel(self.path, merge_cells=merge_cells)
xf = ExcelFile(self.path)
result = read_excel(xf, xf.sheet_names[0],
index_col=0)
tm.assert_frame_equal(result, df)
assert result.index.name == 'foo'
def test_excel_roundtrip_datetime(self, merge_cells, *_):
# datetime.date, not sure what to test here exactly
tsf = self.tsframe.copy()
tsf.index = [x.date() for x in self.tsframe.index]
tsf.to_excel(self.path, "test1", merge_cells=merge_cells)
reader = ExcelFile(self.path)
recons = read_excel(reader, "test1", index_col=0)
tm.assert_frame_equal(self.tsframe, recons)
def test_excel_date_datetime_format(self, merge_cells, engine, ext):
# see gh-4133
#
# Excel output format strings
df = DataFrame([[date(2014, 1, 31),
date(1999, 9, 24)],
[datetime(1998, 5, 26, 23, 33, 4),
datetime(2014, 2, 28, 13, 5, 13)]],
index=["DATE", "DATETIME"], columns=["X", "Y"])
df_expected = DataFrame([[datetime(2014, 1, 31),
datetime(1999, 9, 24)],
[datetime(1998, 5, 26, 23, 33, 4),
datetime(2014, 2, 28, 13, 5, 13)]],
index=["DATE", "DATETIME"], columns=["X", "Y"])
with ensure_clean(ext) as filename2:
writer1 = ExcelWriter(self.path)
writer2 = ExcelWriter(filename2,
date_format="DD.MM.YYYY",
datetime_format="DD.MM.YYYY HH-MM-SS")
df.to_excel(writer1, "test1")
df.to_excel(writer2, "test1")
writer1.close()
writer2.close()
reader1 = ExcelFile(self.path)
reader2 = ExcelFile(filename2)
rs1 = read_excel(reader1, "test1", index_col=0)
rs2 = read_excel(reader2, "test1", index_col=0)
tm.assert_frame_equal(rs1, rs2)
# Since the reader returns a datetime object for dates,
# we need to use df_expected to check the result.
tm.assert_frame_equal(rs2, df_expected)
def test_to_excel_interval_no_labels(self, *_):
# see gh-19242
#
# Test writing Interval without labels.
frame = DataFrame(np.random.randint(-10, 10, size=(20, 1)),
dtype=np.int64)
expected = frame.copy()
frame["new"] = pd.cut(frame[0], 10)
expected["new"] = pd.cut(expected[0], 10).astype(str)
frame.to_excel(self.path, "test1")
reader = ExcelFile(self.path)
recons = read_excel(reader, "test1", index_col=0)
tm.assert_frame_equal(expected, recons)
def test_to_excel_interval_labels(self, *_):
# see gh-19242
#
# Test writing Interval with labels.
frame = DataFrame(np.random.randint(-10, 10, size=(20, 1)),
dtype=np.int64)
expected = frame.copy()
intervals = pd.cut(frame[0], 10, labels=["A", "B", "C", "D", "E",
"F", "G", "H", "I", "J"])
frame["new"] = intervals
expected["new"] = pd.Series(list(intervals))
frame.to_excel(self.path, "test1")
reader = ExcelFile(self.path)
recons = read_excel(reader, "test1", index_col=0)
tm.assert_frame_equal(expected, recons)
def test_to_excel_timedelta(self, *_):
# see gh-19242, gh-9155
#
# Test writing timedelta to xls.
frame = DataFrame(np.random.randint(-10, 10, size=(20, 1)),
columns=["A"], dtype=np.int64)
expected = frame.copy()
frame["new"] = frame["A"].apply(lambda x: timedelta(seconds=x))
expected["new"] = expected["A"].apply(
lambda x: timedelta(seconds=x).total_seconds() / float(86400))
frame.to_excel(self.path, "test1")
reader = ExcelFile(self.path)
recons = read_excel(reader, "test1", index_col=0)
tm.assert_frame_equal(expected, recons)
def test_to_excel_periodindex(self, merge_cells, engine, ext):
frame = self.tsframe
xp = frame.resample('M', kind='period').mean()
xp.to_excel(self.path, 'sht1')
reader = ExcelFile(self.path)
rs = read_excel(reader, 'sht1', index_col=0)
tm.assert_frame_equal(xp, rs.to_period('M'))
def test_to_excel_multiindex(self, merge_cells, engine, ext):
frame = self.frame
arrays = np.arange(len(frame.index) * 2).reshape(2, -1)
new_index = MultiIndex.from_arrays(arrays,
names=['first', 'second'])
frame.index = new_index
frame.to_excel(self.path, 'test1', header=False)
frame.to_excel(self.path, 'test1', columns=['A', 'B'])
# round trip
frame.to_excel(self.path, 'test1', merge_cells=merge_cells)
reader = ExcelFile(self.path)
df = read_excel(reader, 'test1', index_col=[0, 1])
tm.assert_frame_equal(frame, df)
# GH13511
def test_to_excel_multiindex_nan_label(self, merge_cells, engine, ext):
frame = pd.DataFrame({'A': [None, 2, 3],
'B': [10, 20, 30],
'C': np.random.sample(3)})
frame = frame.set_index(['A', 'B'])
frame.to_excel(self.path, merge_cells=merge_cells)
df = read_excel(self.path, index_col=[0, 1])
tm.assert_frame_equal(frame, df)
# Test for Issue 11328. If column indices are integers, make
# sure they are handled correctly for either setting of
# merge_cells
def test_to_excel_multiindex_cols(self, merge_cells, engine, ext):
frame = self.frame
arrays = np.arange(len(frame.index) * 2).reshape(2, -1)
new_index = MultiIndex.from_arrays(arrays,
names=['first', 'second'])
frame.index = new_index
new_cols_index = MultiIndex.from_tuples([(40, 1), (40, 2),
(50, 1), (50, 2)])
frame.columns = new_cols_index
header = [0, 1]
if not merge_cells:
header = 0
# round trip
frame.to_excel(self.path, 'test1', merge_cells=merge_cells)
reader = ExcelFile(self.path)
df = read_excel(reader, 'test1', header=header,
index_col=[0, 1])
if not merge_cells:
fm = frame.columns.format(sparsify=False,
adjoin=False, names=False)
frame.columns = [".".join(map(str, q)) for q in zip(*fm)]
tm.assert_frame_equal(frame, df)
def test_to_excel_multiindex_dates(self, merge_cells, engine, ext):
# try multiindex with dates
tsframe = self.tsframe.copy()
new_index = [tsframe.index, np.arange(len(tsframe.index))]
tsframe.index = MultiIndex.from_arrays(new_index)
tsframe.index.names = ['time', 'foo']
tsframe.to_excel(self.path, 'test1', merge_cells=merge_cells)
reader = ExcelFile(self.path)
recons = read_excel(reader, 'test1',
index_col=[0, 1])
tm.assert_frame_equal(tsframe, recons)
assert recons.index.names == ('time', 'foo')
def test_to_excel_multiindex_no_write_index(self, merge_cells, engine,
ext):
# Test writing and re-reading a MI witout the index. GH 5616.
# Initial non-MI frame.
frame1 = DataFrame({'a': [10, 20], 'b': [30, 40], 'c': [50, 60]})
# Add a MI.
frame2 = frame1.copy()
multi_index = MultiIndex.from_tuples([(70, 80), (90, 100)])
frame2.index = multi_index
# Write out to Excel without the index.
frame2.to_excel(self.path, 'test1', index=False)
# Read it back in.
reader = ExcelFile(self.path)
frame3 = read_excel(reader, 'test1')
# Test that it is the same as the initial frame.
tm.assert_frame_equal(frame1, frame3)
def test_to_excel_float_format(self, *_):
df = DataFrame([[0.123456, 0.234567, 0.567567],
[12.32112, 123123.2, 321321.2]],
index=["A", "B"], columns=["X", "Y", "Z"])
df.to_excel(self.path, "test1", float_format="%.2f")
reader = ExcelFile(self.path)
result = read_excel(reader, "test1", index_col=0)
expected = DataFrame([[0.12, 0.23, 0.57],
[12.32, 123123.20, 321321.20]],
index=["A", "B"], columns=["X", "Y", "Z"])
tm.assert_frame_equal(result, expected)
def test_to_excel_output_encoding(self, merge_cells, engine, ext):
# Avoid mixed inferred_type.
df = DataFrame([[u"\u0192", u"\u0193", u"\u0194"],
[u"\u0195", u"\u0196", u"\u0197"]],
index=[u"A\u0192", u"B"],
columns=[u"X\u0193", u"Y", u"Z"])
with ensure_clean("__tmp_to_excel_float_format__." + ext) as filename:
df.to_excel(filename, sheet_name="TestSheet", encoding="utf8")
result = read_excel(filename, "TestSheet",
encoding="utf8", index_col=0)
tm.assert_frame_equal(result, df)
def test_to_excel_unicode_filename(self, merge_cells, engine, ext):
with ensure_clean(u("\u0192u.") + ext) as filename:
try:
f = open(filename, "wb")
except UnicodeEncodeError:
pytest.skip("No unicode file names on this system")
else:
f.close()
df = DataFrame([[0.123456, 0.234567, 0.567567],
[12.32112, 123123.2, 321321.2]],
index=["A", "B"], columns=["X", "Y", "Z"])
df.to_excel(filename, "test1", float_format="%.2f")
reader = ExcelFile(filename)
result = read_excel(reader, "test1", index_col=0)
expected = DataFrame([[0.12, 0.23, 0.57],
[12.32, 123123.20, 321321.20]],
index=["A", "B"], columns=["X", "Y", "Z"])
tm.assert_frame_equal(result, expected)
# def test_to_excel_header_styling_xls(self, merge_cells, engine, ext):
# import StringIO
# s = StringIO(
# """Date,ticker,type,value
# 2001-01-01,x,close,12.2
# 2001-01-01,x,open ,12.1
# 2001-01-01,y,close,12.2
# 2001-01-01,y,open ,12.1
# 2001-02-01,x,close,12.2
# 2001-02-01,x,open ,12.1
# 2001-02-01,y,close,12.2
# 2001-02-01,y,open ,12.1
# 2001-03-01,x,close,12.2
# 2001-03-01,x,open ,12.1
# 2001-03-01,y,close,12.2
# 2001-03-01,y,open ,12.1""")
# df = read_csv(s, parse_dates=["Date"])
# pdf = df.pivot_table(values="value", rows=["ticker"],
# cols=["Date", "type"])
# try:
# import xlwt
# import xlrd
# except ImportError:
# pytest.skip
# filename = '__tmp_to_excel_header_styling_xls__.xls'
# pdf.to_excel(filename, 'test1')
# wbk = xlrd.open_workbook(filename,
# formatting_info=True)
# assert ["test1"] == wbk.sheet_names()
# ws = wbk.sheet_by_name('test1')
# assert [(0, 1, 5, 7), (0, 1, 3, 5), (0, 1, 1, 3)] == ws.merged_cells
# for i in range(0, 2):
# for j in range(0, 7):
# xfx = ws.cell_xf_index(0, 0)
# cell_xf = wbk.xf_list[xfx]
# font = wbk.font_list
# assert 1 == font[cell_xf.font_index].bold
# assert 1 == cell_xf.border.top_line_style
# assert 1 == cell_xf.border.right_line_style
# assert 1 == cell_xf.border.bottom_line_style
# assert 1 == cell_xf.border.left_line_style
# assert 2 == cell_xf.alignment.hor_align
# os.remove(filename)
# def test_to_excel_header_styling_xlsx(self, merge_cells, engine, ext):
# import StringIO
# s = StringIO(
# """Date,ticker,type,value
# 2001-01-01,x,close,12.2
# 2001-01-01,x,open ,12.1
# 2001-01-01,y,close,12.2
# 2001-01-01,y,open ,12.1
# 2001-02-01,x,close,12.2
# 2001-02-01,x,open ,12.1
# 2001-02-01,y,close,12.2
# 2001-02-01,y,open ,12.1
# 2001-03-01,x,close,12.2
# 2001-03-01,x,open ,12.1
# 2001-03-01,y,close,12.2
# 2001-03-01,y,open ,12.1""")
# df = read_csv(s, parse_dates=["Date"])
# pdf = df.pivot_table(values="value", rows=["ticker"],
# cols=["Date", "type"])
# try:
# import openpyxl
# from openpyxl.cell import get_column_letter
# except ImportError:
# pytest.skip
# if openpyxl.__version__ < '1.6.1':
# pytest.skip
# # test xlsx_styling
# filename = '__tmp_to_excel_header_styling_xlsx__.xlsx'
# pdf.to_excel(filename, 'test1')
# wbk = openpyxl.load_workbook(filename)
# assert ["test1"] == wbk.get_sheet_names()
# ws = wbk.get_sheet_by_name('test1')
# xlsaddrs = ["%s2" % chr(i) for i in range(ord('A'), ord('H'))]
# xlsaddrs += ["A%s" % i for i in range(1, 6)]
# xlsaddrs += ["B1", "D1", "F1"]
# for xlsaddr in xlsaddrs:
# cell = ws.cell(xlsaddr)
# assert cell.style.font.bold
# assert (openpyxl.style.Border.BORDER_THIN ==
# cell.style.borders.top.border_style)
# assert (openpyxl.style.Border.BORDER_THIN ==
# cell.style.borders.right.border_style)
# assert (openpyxl.style.Border.BORDER_THIN ==
# cell.style.borders.bottom.border_style)
# assert (openpyxl.style.Border.BORDER_THIN ==
# cell.style.borders.left.border_style)
# assert (openpyxl.style.Alignment.HORIZONTAL_CENTER ==
# cell.style.alignment.horizontal)
# mergedcells_addrs = ["C1", "E1", "G1"]
# for maddr in mergedcells_addrs:
# assert ws.cell(maddr).merged
# os.remove(filename)
@pytest.mark.parametrize("use_headers", [True, False])
@pytest.mark.parametrize("r_idx_nlevels", [1, 2, 3])
@pytest.mark.parametrize("c_idx_nlevels", [1, 2, 3])
def test_excel_010_hemstring(self, merge_cells, engine, ext,
c_idx_nlevels, r_idx_nlevels, use_headers):
def roundtrip(data, header=True, parser_hdr=0, index=True):
data.to_excel(self.path, header=header,
merge_cells=merge_cells, index=index)
xf = ExcelFile(self.path)
return read_excel(xf, xf.sheet_names[0], header=parser_hdr)
# Basic test.
parser_header = 0 if use_headers else None
res = roundtrip(DataFrame([0]), use_headers, parser_header)
assert res.shape == (1, 2)
assert res.iloc[0, 0] is not np.nan
# More complex tests with multi-index.
nrows = 5
ncols = 3
from pandas.util.testing import makeCustomDataframe as mkdf
# ensure limited functionality in 0.10
# override of gh-2370 until sorted out in 0.11
df = mkdf(nrows, ncols, r_idx_nlevels=r_idx_nlevels,
c_idx_nlevels=c_idx_nlevels)
# This if will be removed once multi-column Excel writing
# is implemented. For now fixing gh-9794.
if c_idx_nlevels > 1:
with pytest.raises(NotImplementedError):
roundtrip(df, use_headers, index=False)
else:
res = roundtrip(df, use_headers)
if use_headers:
assert res.shape == (nrows, ncols + r_idx_nlevels)
else:
# First row taken as columns.
assert res.shape == (nrows - 1, ncols + r_idx_nlevels)
# No NaNs.
for r in range(len(res.index)):
for c in range(len(res.columns)):
assert res.iloc[r, c] is not np.nan
def test_duplicated_columns(self, *_):
# see gh-5235
write_frame = DataFrame([[1, 2, 3], [1, 2, 3], [1, 2, 3]])
col_names = ["A", "B", "B"]
write_frame.columns = col_names
write_frame.to_excel(self.path, "test1")
read_frame = read_excel(self.path, "test1", index_col=0)
read_frame.columns = col_names
tm.assert_frame_equal(write_frame, read_frame)
# see gh-11007, gh-10970
write_frame = DataFrame([[1, 2, 3, 4], [5, 6, 7, 8]],
columns=["A", "B", "A", "B"])
write_frame.to_excel(self.path, "test1")
read_frame = read_excel(self.path, "test1", index_col=0)
read_frame.columns = ["A", "B", "A", "B"]
tm.assert_frame_equal(write_frame, read_frame)
# see gh-10982
write_frame.to_excel(self.path, "test1", index=False, header=False)
read_frame = read_excel(self.path, "test1", header=None)
write_frame.columns = [0, 1, 2, 3]
tm.assert_frame_equal(write_frame, read_frame)
def test_swapped_columns(self, merge_cells, engine, ext):
# Test for issue #5427.
write_frame = DataFrame({'A': [1, 1, 1],
'B': [2, 2, 2]})
write_frame.to_excel(self.path, 'test1', columns=['B', 'A'])
read_frame = read_excel(self.path, 'test1', header=0)
tm.assert_series_equal(write_frame['A'], read_frame['A'])
tm.assert_series_equal(write_frame['B'], read_frame['B'])
def test_invalid_columns(self, *_):
# see gh-10982
write_frame = DataFrame({"A": [1, 1, 1],
"B": [2, 2, 2]})
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
write_frame.to_excel(self.path, "test1", columns=["B", "C"])
expected = write_frame.reindex(columns=["B", "C"])
read_frame = read_excel(self.path, "test1", index_col=0)
tm.assert_frame_equal(expected, read_frame)
with pytest.raises(KeyError):
write_frame.to_excel(self.path, "test1", columns=["C", "D"])
def test_comment_arg(self, *_):
# see gh-18735
#
# Test the comment argument functionality to read_excel.
# Create file to read in.
df = DataFrame({"A": ["one", "#one", "one"],
"B": ["two", "two", "#two"]})
df.to_excel(self.path, "test_c")
# Read file without comment arg.
result1 = read_excel(self.path, "test_c", index_col=0)
result1.iloc[1, 0] = None
result1.iloc[1, 1] = None
result1.iloc[2, 1] = None
result2 = read_excel(self.path, "test_c", comment="#", index_col=0)
tm.assert_frame_equal(result1, result2)
def test_comment_default(self, merge_cells, engine, ext):
# Re issue #18735
# Test the comment argument default to read_excel
# Create file to read in
df = DataFrame({'A': ['one', '#one', 'one'],
'B': ['two', 'two', '#two']})
df.to_excel(self.path, 'test_c')
# Read file with default and explicit comment=None
result1 = read_excel(self.path, 'test_c')
result2 = read_excel(self.path, 'test_c', comment=None)
tm.assert_frame_equal(result1, result2)
def test_comment_used(self, *_):
# see gh-18735
#
# Test the comment argument is working as expected when used.
# Create file to read in.
df = DataFrame({"A": ["one", "#one", "one"],
"B": ["two", "two", "#two"]})
df.to_excel(self.path, "test_c")
# Test read_frame_comment against manually produced expected output.
expected = DataFrame({"A": ["one", None, "one"],
"B": ["two", None, None]})
result = read_excel(self.path, "test_c", comment="#", index_col=0)
tm.assert_frame_equal(result, expected)
def test_comment_empty_line(self, merge_cells, engine, ext):
# Re issue #18735
# Test that read_excel ignores commented lines at the end of file
df = DataFrame({'a': ['1', '#2'], 'b': ['2', '3']})
df.to_excel(self.path, index=False)
# Test that all-comment lines at EoF are ignored
expected = DataFrame({'a': [1], 'b': [2]})
result = read_excel(self.path, comment='#')
tm.assert_frame_equal(result, expected)
def test_datetimes(self, merge_cells, engine, ext):
# Test writing and reading datetimes. For issue #9139. (xref #9185)
datetimes = [datetime(2013, 1, 13, 1, 2, 3),
datetime(2013, 1, 13, 2, 45, 56),
datetime(2013, 1, 13, 4, 29, 49),
datetime(2013, 1, 13, 6, 13, 42),
datetime(2013, 1, 13, 7, 57, 35),
datetime(2013, 1, 13, 9, 41, 28),
datetime(2013, 1, 13, 11, 25, 21),
datetime(2013, 1, 13, 13, 9, 14),
datetime(2013, 1, 13, 14, 53, 7),
datetime(2013, 1, 13, 16, 37, 0),
datetime(2013, 1, 13, 18, 20, 52)]
write_frame = DataFrame({'A': datetimes})
write_frame.to_excel(self.path, 'Sheet1')
read_frame = read_excel(self.path, 'Sheet1', header=0)
tm.assert_series_equal(write_frame['A'], read_frame['A'])
def test_bytes_io(self, merge_cells, engine, ext):
# see gh-7074
bio = BytesIO()
df = DataFrame(np.random.randn(10, 2))
# Pass engine explicitly, as there is no file path to infer from.
writer = ExcelWriter(bio, engine=engine)
df.to_excel(writer)
writer.save()
bio.seek(0)
reread_df = read_excel(bio, index_col=0)
tm.assert_frame_equal(df, reread_df)
def test_write_lists_dict(self, *_):
# see gh-8188.
df = DataFrame({"mixed": ["a", ["b", "c"], {"d": "e", "f": 2}],
"numeric": [1, 2, 3.0],
"str": ["apple", "banana", "cherry"]})
df.to_excel(self.path, "Sheet1")
read = read_excel(self.path, "Sheet1", header=0, index_col=0)
expected = df.copy()
expected.mixed = expected.mixed.apply(str)
expected.numeric = expected.numeric.astype("int64")
tm.assert_frame_equal(read, expected)
def test_true_and_false_value_options(self, *_):
# see gh-13347
df = pd.DataFrame([["foo", "bar"]], columns=["col1", "col2"])
expected = df.replace({"foo": True, "bar": False})
df.to_excel(self.path)
read_frame = read_excel(self.path, true_values=["foo"],
false_values=["bar"], index_col=0)
tm.assert_frame_equal(read_frame, expected)
def test_freeze_panes(self, *_):
# see gh-15160
expected = DataFrame([[1, 2], [3, 4]], columns=["col1", "col2"])
expected.to_excel(self.path, "Sheet1", freeze_panes=(1, 1))
result = read_excel(self.path, index_col=0)
tm.assert_frame_equal(result, expected)
def test_path_path_lib(self, merge_cells, engine, ext):
df = tm.makeDataFrame()
writer = partial(df.to_excel, engine=engine)
reader = partial(pd.read_excel, index_col=0)
result = tm.round_trip_pathlib(writer, reader,
path="foo.{ext}".format(ext=ext))
tm.assert_frame_equal(result, df)
def test_path_local_path(self, merge_cells, engine, ext):
df = tm.makeDataFrame()
writer = partial(df.to_excel, engine=engine)
reader = partial(pd.read_excel, index_col=0)
result = tm.round_trip_pathlib(writer, reader,
path="foo.{ext}".format(ext=ext))
tm.assert_frame_equal(result, df)
@td.skip_if_no('openpyxl')
@pytest.mark.parametrize("merge_cells,ext,engine", [
(None, '.xlsx', 'openpyxl')])
class TestOpenpyxlTests(_WriterBase):
def test_to_excel_styleconverter(self, merge_cells, ext, engine):
from openpyxl import styles
hstyle = {
"font": {
"color": '00FF0000',
"bold": True,
},
"borders": {
"top": "thin",
"right": "thin",
"bottom": "thin",
"left": "thin",
},
"alignment": {
"horizontal": "center",
"vertical": "top",
},
"fill": {
"patternType": 'solid',
'fgColor': {
'rgb': '006666FF',
'tint': 0.3,
},
},
"number_format": {
"format_code": "0.00"
},
"protection": {
"locked": True,
"hidden": False,
},
}
font_color = styles.Color('00FF0000')
font = styles.Font(bold=True, color=font_color)
side = styles.Side(style=styles.borders.BORDER_THIN)
border = styles.Border(top=side, right=side, bottom=side, left=side)
alignment = styles.Alignment(horizontal='center', vertical='top')
fill_color = styles.Color(rgb='006666FF', tint=0.3)
fill = styles.PatternFill(patternType='solid', fgColor=fill_color)
number_format = '0.00'
protection = styles.Protection(locked=True, hidden=False)
kw = _OpenpyxlWriter._convert_to_style_kwargs(hstyle)
assert kw['font'] == font
assert kw['border'] == border
assert kw['alignment'] == alignment
assert kw['fill'] == fill
assert kw['number_format'] == number_format
assert kw['protection'] == protection
def test_write_cells_merge_styled(self, merge_cells, ext, engine):
from pandas.io.formats.excel import ExcelCell
sheet_name = 'merge_styled'
sty_b1 = {'font': {'color': '00FF0000'}}
sty_a2 = {'font': {'color': '0000FF00'}}
initial_cells = [
ExcelCell(col=1, row=0, val=42, style=sty_b1),
ExcelCell(col=0, row=1, val=99, style=sty_a2),
]
sty_merged = {'font': {'color': '000000FF', 'bold': True}}
sty_kwargs = _OpenpyxlWriter._convert_to_style_kwargs(sty_merged)
openpyxl_sty_merged = sty_kwargs['font']
merge_cells = [
ExcelCell(col=0, row=0, val='pandas',
mergestart=1, mergeend=1, style=sty_merged),
]
with ensure_clean(ext) as path:
writer = _OpenpyxlWriter(path)
writer.write_cells(initial_cells, sheet_name=sheet_name)
writer.write_cells(merge_cells, sheet_name=sheet_name)
wks = writer.sheets[sheet_name]
xcell_b1 = wks['B1']
xcell_a2 = wks['A2']
assert xcell_b1.font == openpyxl_sty_merged
assert xcell_a2.font == openpyxl_sty_merged
@pytest.mark.parametrize("mode,expected", [
('w', ['baz']), ('a', ['foo', 'bar', 'baz'])])
def test_write_append_mode(self, merge_cells, ext, engine, mode, expected):
import openpyxl
df = DataFrame([1], columns=['baz'])
with ensure_clean(ext) as f:
wb = openpyxl.Workbook()
wb.worksheets[0].title = 'foo'
wb.worksheets[0]['A1'].value = 'foo'
wb.create_sheet('bar')
wb.worksheets[1]['A1'].value = 'bar'
wb.save(f)
writer = ExcelWriter(f, engine=engine, mode=mode)
df.to_excel(writer, sheet_name='baz', index=False)
writer.save()
wb2 = openpyxl.load_workbook(f)
result = [sheet.title for sheet in wb2.worksheets]
assert result == expected
for index, cell_value in enumerate(expected):
assert wb2.worksheets[index]['A1'].value == cell_value
@td.skip_if_no('xlwt')
@pytest.mark.parametrize("merge_cells,ext,engine", [
(None, '.xls', 'xlwt')])
class TestXlwtTests(_WriterBase):
def test_excel_raise_error_on_multiindex_columns_and_no_index(
self, merge_cells, ext, engine):
# MultiIndex as columns is not yet implemented 9794
cols = MultiIndex.from_tuples([('site', ''),
('2014', 'height'),
('2014', 'weight')])
df = DataFrame(np.random.randn(10, 3), columns=cols)
with pytest.raises(NotImplementedError):
with ensure_clean(ext) as path:
df.to_excel(path, index=False)
def test_excel_multiindex_columns_and_index_true(self, merge_cells, ext,
engine):
cols = MultiIndex.from_tuples([('site', ''),
('2014', 'height'),
('2014', 'weight')])
df = pd.DataFrame(np.random.randn(10, 3), columns=cols)
with ensure_clean(ext) as path:
df.to_excel(path, index=True)
def test_excel_multiindex_index(self, merge_cells, ext, engine):
# MultiIndex as index works so assert no error #9794
cols = MultiIndex.from_tuples([('site', ''),
('2014', 'height'),
('2014', 'weight')])
df = DataFrame(np.random.randn(3, 10), index=cols)
with ensure_clean(ext) as path:
df.to_excel(path, index=False)
def test_to_excel_styleconverter(self, merge_cells, ext, engine):
import xlwt
hstyle = {"font": {"bold": True},
"borders": {"top": "thin",
"right": "thin",
"bottom": "thin",
"left": "thin"},
"alignment": {"horizontal": "center", "vertical": "top"}}
xls_style = _XlwtWriter._convert_to_style(hstyle)
assert xls_style.font.bold
assert xlwt.Borders.THIN == xls_style.borders.top
assert xlwt.Borders.THIN == xls_style.borders.right
assert xlwt.Borders.THIN == xls_style.borders.bottom
assert xlwt.Borders.THIN == xls_style.borders.left
assert xlwt.Alignment.HORZ_CENTER == xls_style.alignment.horz
assert xlwt.Alignment.VERT_TOP == xls_style.alignment.vert
def test_write_append_mode_raises(self, merge_cells, ext, engine):
msg = "Append mode is not supported with xlwt!"
with ensure_clean(ext) as f:
with pytest.raises(ValueError, match=msg):
ExcelWriter(f, engine=engine, mode='a')
@td.skip_if_no('xlsxwriter')
@pytest.mark.parametrize("merge_cells,ext,engine", [
(None, '.xlsx', 'xlsxwriter')])
class TestXlsxWriterTests(_WriterBase):
@td.skip_if_no('openpyxl')
def test_column_format(self, merge_cells, ext, engine):
# Test that column formats are applied to cells. Test for issue #9167.
# Applicable to xlsxwriter only.
with warnings.catch_warnings():
# Ignore the openpyxl lxml warning.
warnings.simplefilter("ignore")
import openpyxl
with ensure_clean(ext) as path:
frame = DataFrame({'A': [123456, 123456],
'B': [123456, 123456]})
writer = ExcelWriter(path)
frame.to_excel(writer)
# Add a number format to col B and ensure it is applied to cells.
num_format = '#,##0'
write_workbook = writer.book
write_worksheet = write_workbook.worksheets()[0]
col_format = write_workbook.add_format({'num_format': num_format})
write_worksheet.set_column('B:B', None, col_format)
writer.save()
read_workbook = openpyxl.load_workbook(path)
try:
read_worksheet = read_workbook['Sheet1']
except TypeError:
# compat
read_worksheet = read_workbook.get_sheet_by_name(name='Sheet1')
# Get the number format from the cell.
try:
cell = read_worksheet['B2']
except TypeError:
# compat
cell = read_worksheet.cell('B2')
try:
read_num_format = cell.number_format
except Exception:
read_num_format = cell.style.number_format._format_code
assert read_num_format == num_format
def test_write_append_mode_raises(self, merge_cells, ext, engine):
msg = "Append mode is not supported with xlsxwriter!"
with ensure_clean(ext) as f:
with pytest.raises(ValueError, match=msg):
ExcelWriter(f, engine=engine, mode='a')
class TestExcelWriterEngineTests(object):
@pytest.mark.parametrize('klass,ext', [
pytest.param(_XlsxWriter, '.xlsx', marks=pytest.mark.skipif(
not td.safe_import('xlsxwriter'), reason='No xlsxwriter')),
pytest.param(_OpenpyxlWriter, '.xlsx', marks=pytest.mark.skipif(
not td.safe_import('openpyxl'), reason='No openpyxl')),
pytest.param(_XlwtWriter, '.xls', marks=pytest.mark.skipif(
not td.safe_import('xlwt'), reason='No xlwt'))
])
def test_ExcelWriter_dispatch(self, klass, ext):
with ensure_clean(ext) as path:
writer = ExcelWriter(path)
if ext == '.xlsx' and td.safe_import('xlsxwriter'):
# xlsxwriter has preference over openpyxl if both installed
assert isinstance(writer, _XlsxWriter)
else:
assert isinstance(writer, klass)
def test_ExcelWriter_dispatch_raises(self):
with pytest.raises(ValueError, match='No engine'):
ExcelWriter('nothing')
@pytest.mark.filterwarnings("ignore:\\nPanel:FutureWarning")
def test_register_writer(self):
# some awkward mocking to test out dispatch and such actually works
called_save = []
called_write_cells = []
class DummyClass(ExcelWriter):
called_save = False
called_write_cells = False
supported_extensions = ['test', 'xlsx', 'xls']
engine = 'dummy'
def save(self):
called_save.append(True)
def write_cells(self, *args, **kwargs):
called_write_cells.append(True)
def check_called(func):
func()
assert len(called_save) >= 1
assert len(called_write_cells) >= 1
del called_save[:]
del called_write_cells[:]
with pd.option_context('io.excel.xlsx.writer', 'dummy'):
register_writer(DummyClass)
writer = ExcelWriter('something.test')
assert isinstance(writer, DummyClass)
df = tm.makeCustomDataframe(1, 1)
with catch_warnings(record=True):
panel = tm.makePanel()
func = lambda: df.to_excel('something.test')
check_called(func)
check_called(lambda: panel.to_excel('something.test'))
check_called(lambda: df.to_excel('something.xlsx'))
check_called(
lambda: df.to_excel(
'something.xls', engine='dummy'))
@pytest.mark.parametrize('engine', [
pytest.param('xlwt',
marks=pytest.mark.xfail(reason='xlwt does not support '
'openpyxl-compatible '
'style dicts',
strict=True)),
'xlsxwriter',
'openpyxl',
])
def test_styler_to_excel(engine):
def style(df):
# XXX: RGB colors not supported in xlwt
return DataFrame([['font-weight: bold', '', ''],
['', 'color: blue', ''],
['', '', 'text-decoration: underline'],
['border-style: solid', '', ''],
['', 'font-style: italic', ''],
['', '', 'text-align: right'],
['background-color: red', '', ''],
['number-format: 0%', '', ''],
['', '', ''],
['', '', ''],
['', '', '']],
index=df.index, columns=df.columns)
def assert_equal_style(cell1, cell2):
# XXX: should find a better way to check equality
assert cell1.alignment.__dict__ == cell2.alignment.__dict__
assert cell1.border.__dict__ == cell2.border.__dict__
assert cell1.fill.__dict__ == cell2.fill.__dict__
assert cell1.font.__dict__ == cell2.font.__dict__
assert cell1.number_format == cell2.number_format
assert cell1.protection.__dict__ == cell2.protection.__dict__
def custom_converter(css):
# use bold iff there is custom style attached to the cell
if css.strip(' \n;'):
return {'font': {'bold': True}}
return {}
pytest.importorskip('jinja2')
pytest.importorskip(engine)
# Prepare spreadsheets
df = DataFrame(np.random.randn(11, 3))
with ensure_clean('.xlsx' if engine != 'xlwt' else '.xls') as path:
writer = ExcelWriter(path, engine=engine)
df.to_excel(writer, sheet_name='frame')
df.style.to_excel(writer, sheet_name='unstyled')
styled = df.style.apply(style, axis=None)
styled.to_excel(writer, sheet_name='styled')
ExcelFormatter(styled, style_converter=custom_converter).write(
writer, sheet_name='custom')
writer.save()
if engine not in ('openpyxl', 'xlsxwriter'):
# For other engines, we only smoke test
return
openpyxl = pytest.importorskip('openpyxl')
wb = openpyxl.load_workbook(path)
# (1) compare DataFrame.to_excel and Styler.to_excel when unstyled
n_cells = 0
for col1, col2 in zip(wb['frame'].columns,
wb['unstyled'].columns):
assert len(col1) == len(col2)
for cell1, cell2 in zip(col1, col2):
assert cell1.value == cell2.value
assert_equal_style(cell1, cell2)
n_cells += 1
# ensure iteration actually happened:
assert n_cells == (11 + 1) * (3 + 1)
# (2) check styling with default converter
# XXX: openpyxl (as at 2.4) prefixes colors with 00, xlsxwriter with FF
alpha = '00' if engine == 'openpyxl' else 'FF'
n_cells = 0
for col1, col2 in zip(wb['frame'].columns,
wb['styled'].columns):
assert len(col1) == len(col2)
for cell1, cell2 in zip(col1, col2):
ref = '%s%d' % (cell2.column, cell2.row)
# XXX: this isn't as strong a test as ideal; we should
# confirm that differences are exclusive
if ref == 'B2':
assert not cell1.font.bold
assert cell2.font.bold
elif ref == 'C3':
assert cell1.font.color.rgb != cell2.font.color.rgb
assert cell2.font.color.rgb == alpha + '0000FF'
elif ref == 'D4':
# This fails with engine=xlsxwriter due to
# https://bitbucket.org/openpyxl/openpyxl/issues/800
if engine == 'xlsxwriter' \
and (LooseVersion(openpyxl.__version__) <
LooseVersion('2.4.6')):
pass
else:
assert cell1.font.underline != cell2.font.underline
assert cell2.font.underline == 'single'
elif ref == 'B5':
assert not cell1.border.left.style
assert (cell2.border.top.style ==
cell2.border.right.style ==
cell2.border.bottom.style ==
cell2.border.left.style ==
'medium')
elif ref == 'C6':
assert not cell1.font.italic
assert cell2.font.italic
elif ref == 'D7':
assert (cell1.alignment.horizontal !=
cell2.alignment.horizontal)
assert cell2.alignment.horizontal == 'right'
elif ref == 'B8':
assert cell1.fill.fgColor.rgb != cell2.fill.fgColor.rgb
assert cell1.fill.patternType != cell2.fill.patternType
assert cell2.fill.fgColor.rgb == alpha + 'FF0000'
assert cell2.fill.patternType == 'solid'
elif ref == 'B9':
assert cell1.number_format == 'General'
assert cell2.number_format == '0%'
else:
assert_equal_style(cell1, cell2)
assert cell1.value == cell2.value
n_cells += 1
assert n_cells == (11 + 1) * (3 + 1)
# (3) check styling with custom converter
n_cells = 0
for col1, col2 in zip(wb['frame'].columns,
wb['custom'].columns):
assert len(col1) == len(col2)
for cell1, cell2 in zip(col1, col2):
ref = '%s%d' % (cell2.column, cell2.row)
if ref in ('B2', 'C3', 'D4', 'B5', 'C6', 'D7', 'B8', 'B9'):
assert not cell1.font.bold
assert cell2.font.bold
else:
assert_equal_style(cell1, cell2)
assert cell1.value == cell2.value
n_cells += 1
assert n_cells == (11 + 1) * (3 + 1)
@td.skip_if_no('openpyxl')
@pytest.mark.skipif(not PY36, reason='requires fspath')
class TestFSPath(object):
def test_excelfile_fspath(self):
with tm.ensure_clean('foo.xlsx') as path:
df = DataFrame({"A": [1, 2]})
df.to_excel(path)
xl = ExcelFile(path)
result = os.fspath(xl)
assert result == path
def test_excelwriter_fspath(self):
with tm.ensure_clean('foo.xlsx') as path:
writer = ExcelWriter(path)
assert os.fspath(writer) == str(path)
| bsd-3-clause |
roxyboy/scikit-learn | examples/calibration/plot_calibration.py | 225 | 4795 | """
======================================
Probability calibration of classifiers
======================================
When performing classification you often want to predict not only
the class label, but also the associated probability. This probability
gives you some kind of confidence on the prediction. However, not all
classifiers provide well-calibrated probabilities, some being over-confident
while others being under-confident. Thus, a separate calibration of predicted
probabilities is often desirable as a postprocessing. This example illustrates
two different methods for this calibration and evaluates the quality of the
returned probabilities using Brier's score
(see http://en.wikipedia.org/wiki/Brier_score).
Compared are the estimated probability using a Gaussian naive Bayes classifier
without calibration, with a sigmoid calibration, and with a non-parametric
isotonic calibration. One can observe that only the non-parametric model is able
to provide a probability calibration that returns probabilities close to the
expected 0.5 for most of the samples belonging to the middle cluster with
heterogeneous labels. This results in a significantly improved Brier score.
"""
print(__doc__)
# Author: Mathieu Blondel <[email protected]>
# Alexandre Gramfort <[email protected]>
# Balazs Kegl <[email protected]>
# Jan Hendrik Metzen <[email protected]>
# License: BSD Style.
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
from sklearn.datasets import make_blobs
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import brier_score_loss
from sklearn.calibration import CalibratedClassifierCV
from sklearn.cross_validation import train_test_split
n_samples = 50000
n_bins = 3 # use 3 bins for calibration_curve as we have 3 clusters here
# Generate 3 blobs with 2 classes where the second blob contains
# half positive samples and half negative samples. Probability in this
# blob is therefore 0.5.
centers = [(-5, -5), (0, 0), (5, 5)]
X, y = make_blobs(n_samples=n_samples, n_features=2, cluster_std=1.0,
centers=centers, shuffle=False, random_state=42)
y[:n_samples // 2] = 0
y[n_samples // 2:] = 1
sample_weight = np.random.RandomState(42).rand(y.shape[0])
# split train, test for calibration
X_train, X_test, y_train, y_test, sw_train, sw_test = \
train_test_split(X, y, sample_weight, test_size=0.9, random_state=42)
# Gaussian Naive-Bayes with no calibration
clf = GaussianNB()
clf.fit(X_train, y_train) # GaussianNB itself does not support sample-weights
prob_pos_clf = clf.predict_proba(X_test)[:, 1]
# Gaussian Naive-Bayes with isotonic calibration
clf_isotonic = CalibratedClassifierCV(clf, cv=2, method='isotonic')
clf_isotonic.fit(X_train, y_train, sw_train)
prob_pos_isotonic = clf_isotonic.predict_proba(X_test)[:, 1]
# Gaussian Naive-Bayes with sigmoid calibration
clf_sigmoid = CalibratedClassifierCV(clf, cv=2, method='sigmoid')
clf_sigmoid.fit(X_train, y_train, sw_train)
prob_pos_sigmoid = clf_sigmoid.predict_proba(X_test)[:, 1]
print("Brier scores: (the smaller the better)")
clf_score = brier_score_loss(y_test, prob_pos_clf, sw_test)
print("No calibration: %1.3f" % clf_score)
clf_isotonic_score = brier_score_loss(y_test, prob_pos_isotonic, sw_test)
print("With isotonic calibration: %1.3f" % clf_isotonic_score)
clf_sigmoid_score = brier_score_loss(y_test, prob_pos_sigmoid, sw_test)
print("With sigmoid calibration: %1.3f" % clf_sigmoid_score)
###############################################################################
# Plot the data and the predicted probabilities
plt.figure()
y_unique = np.unique(y)
colors = cm.rainbow(np.linspace(0.0, 1.0, y_unique.size))
for this_y, color in zip(y_unique, colors):
this_X = X_train[y_train == this_y]
this_sw = sw_train[y_train == this_y]
plt.scatter(this_X[:, 0], this_X[:, 1], s=this_sw * 50, c=color, alpha=0.5,
label="Class %s" % this_y)
plt.legend(loc="best")
plt.title("Data")
plt.figure()
order = np.lexsort((prob_pos_clf, ))
plt.plot(prob_pos_clf[order], 'r', label='No calibration (%1.3f)' % clf_score)
plt.plot(prob_pos_isotonic[order], 'g', linewidth=3,
label='Isotonic calibration (%1.3f)' % clf_isotonic_score)
plt.plot(prob_pos_sigmoid[order], 'b', linewidth=3,
label='Sigmoid calibration (%1.3f)' % clf_sigmoid_score)
plt.plot(np.linspace(0, y_test.size, 51)[1::2],
y_test[order].reshape(25, -1).mean(1),
'k', linewidth=3, label=r'Empirical')
plt.ylim([-0.05, 1.05])
plt.xlabel("Instances sorted according to predicted probability "
"(uncalibrated GNB)")
plt.ylabel("P(y=1)")
plt.legend(loc="upper left")
plt.title("Gaussian naive Bayes probabilities")
plt.show()
| bsd-3-clause |
dopplershift/MetPy | examples/meteogram_metpy.py | 2 | 8861 | # Copyright (c) 2017 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""
Meteogram
=========
Plots time series data as a meteogram.
"""
import datetime as dt
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
from metpy.calc import dewpoint_from_relative_humidity
from metpy.cbook import get_test_data
from metpy.plots import add_metpy_logo
from metpy.units import units
def calc_mslp(t, p, h):
return p * (1 - (0.0065 * h) / (t + 0.0065 * h + 273.15)) ** (-5.257)
# Make meteogram plot
class Meteogram:
""" Plot a time series of meteorological data from a particular station as a
meteogram with standard variables to visualize, including thermodynamic,
kinematic, and pressure. The functions below control the plotting of each
variable.
TO DO: Make the subplot creation dynamic so the number of rows is not
static as it is currently. """
def __init__(self, fig, dates, probeid, time=None, axis=0):
"""
Required input:
fig: figure object
dates: array of dates corresponding to the data
probeid: ID of the station
Optional Input:
time: Time the data is to be plotted
axis: number that controls the new axis to be plotted (FOR FUTURE)
"""
if not time:
time = dt.datetime.utcnow()
self.start = dates[0]
self.fig = fig
self.end = dates[-1]
self.axis_num = 0
self.dates = mpl.dates.date2num(dates)
self.time = time.strftime('%Y-%m-%d %H:%M UTC')
self.title = f'Latest Ob Time: {self.time}\nProbe ID: {probeid}'
def plot_winds(self, ws, wd, wsmax, plot_range=None):
"""
Required input:
ws: Wind speeds (knots)
wd: Wind direction (degrees)
wsmax: Wind gust (knots)
Optional Input:
plot_range: Data range for making figure (list of (min,max,step))
"""
# PLOT WIND SPEED AND WIND DIRECTION
self.ax1 = fig.add_subplot(4, 1, 1)
ln1 = self.ax1.plot(self.dates, ws, label='Wind Speed')
self.ax1.fill_between(self.dates, ws, 0)
self.ax1.set_xlim(self.start, self.end)
if not plot_range:
plot_range = [0, 20, 1]
self.ax1.set_ylabel('Wind Speed (knots)', multialignment='center')
self.ax1.set_ylim(plot_range[0], plot_range[1], plot_range[2])
self.ax1.grid(b=True, which='major', axis='y', color='k', linestyle='--',
linewidth=0.5)
ln2 = self.ax1.plot(self.dates, wsmax, '.r', label='3-sec Wind Speed Max')
ax7 = self.ax1.twinx()
ln3 = ax7.plot(self.dates, wd, '.k', linewidth=0.5, label='Wind Direction')
ax7.set_ylabel('Wind\nDirection\n(degrees)', multialignment='center')
ax7.set_ylim(0, 360)
ax7.set_yticks(np.arange(45, 405, 90))
ax7.set_yticklabels(['NE', 'SE', 'SW', 'NW'])
lines = ln1 + ln2 + ln3
labs = [line.get_label() for line in lines]
ax7.xaxis.set_major_formatter(mpl.dates.DateFormatter('%d/%H UTC'))
ax7.legend(lines, labs, loc='upper center',
bbox_to_anchor=(0.5, 1.2), ncol=3, prop={'size': 12})
def plot_thermo(self, t, td, plot_range=None):
"""
Required input:
T: Temperature (deg F)
TD: Dewpoint (deg F)
Optional Input:
plot_range: Data range for making figure (list of (min,max,step))
"""
# PLOT TEMPERATURE AND DEWPOINT
if not plot_range:
plot_range = [10, 90, 2]
self.ax2 = fig.add_subplot(4, 1, 2, sharex=self.ax1)
ln4 = self.ax2.plot(self.dates, t, 'r-', label='Temperature')
self.ax2.fill_between(self.dates, t, td, color='r')
self.ax2.set_ylabel('Temperature\n(F)', multialignment='center')
self.ax2.grid(b=True, which='major', axis='y', color='k', linestyle='--',
linewidth=0.5)
self.ax2.set_ylim(plot_range[0], plot_range[1], plot_range[2])
ln5 = self.ax2.plot(self.dates, td, 'g-', label='Dewpoint')
self.ax2.fill_between(self.dates, td, self.ax2.get_ylim()[0], color='g')
ax_twin = self.ax2.twinx()
ax_twin.set_ylim(plot_range[0], plot_range[1], plot_range[2])
lines = ln4 + ln5
labs = [line.get_label() for line in lines]
ax_twin.xaxis.set_major_formatter(mpl.dates.DateFormatter('%d/%H UTC'))
self.ax2.legend(lines, labs, loc='upper center',
bbox_to_anchor=(0.5, 1.2), ncol=2, prop={'size': 12})
def plot_rh(self, rh, plot_range=None):
"""
Required input:
RH: Relative humidity (%)
Optional Input:
plot_range: Data range for making figure (list of (min,max,step))
"""
# PLOT RELATIVE HUMIDITY
if not plot_range:
plot_range = [0, 100, 4]
self.ax3 = fig.add_subplot(4, 1, 3, sharex=self.ax1)
self.ax3.plot(self.dates, rh, 'g-', label='Relative Humidity')
self.ax3.legend(loc='upper center', bbox_to_anchor=(0.5, 1.22), prop={'size': 12})
self.ax3.grid(b=True, which='major', axis='y', color='k', linestyle='--',
linewidth=0.5)
self.ax3.set_ylim(plot_range[0], plot_range[1], plot_range[2])
self.ax3.fill_between(self.dates, rh, self.ax3.get_ylim()[0], color='g')
self.ax3.set_ylabel('Relative Humidity\n(%)', multialignment='center')
self.ax3.xaxis.set_major_formatter(mpl.dates.DateFormatter('%d/%H UTC'))
axtwin = self.ax3.twinx()
axtwin.set_ylim(plot_range[0], plot_range[1], plot_range[2])
def plot_pressure(self, p, plot_range=None):
"""
Required input:
P: Mean Sea Level Pressure (hPa)
Optional Input:
plot_range: Data range for making figure (list of (min,max,step))
"""
# PLOT PRESSURE
if not plot_range:
plot_range = [970, 1030, 2]
self.ax4 = fig.add_subplot(4, 1, 4, sharex=self.ax1)
self.ax4.plot(self.dates, p, 'm', label='Mean Sea Level Pressure')
self.ax4.set_ylabel('Mean Sea\nLevel Pressure\n(mb)', multialignment='center')
self.ax4.set_ylim(plot_range[0], plot_range[1], plot_range[2])
axtwin = self.ax4.twinx()
axtwin.set_ylim(plot_range[0], plot_range[1], plot_range[2])
axtwin.fill_between(self.dates, p, axtwin.get_ylim()[0], color='m')
axtwin.xaxis.set_major_formatter(mpl.dates.DateFormatter('%d/%H UTC'))
self.ax4.legend(loc='upper center', bbox_to_anchor=(0.5, 1.2), prop={'size': 12})
self.ax4.grid(b=True, which='major', axis='y', color='k', linestyle='--',
linewidth=0.5)
# OTHER OPTIONAL AXES TO PLOT
# plot_irradiance
# plot_precipitation
# set the starttime and endtime for plotting, 24 hour range
endtime = dt.datetime(2016, 3, 31, 22, 0, 0, 0)
starttime = endtime - dt.timedelta(hours=24)
# Height of the station to calculate MSLP
hgt_example = 292.
# Parse dates from .csv file, knowing their format as a string and convert to datetime
def parse_date(date):
return dt.datetime.strptime(date.decode('ascii'), '%Y-%m-%d %H:%M:%S')
testdata = np.genfromtxt(get_test_data('timeseries.csv', False), names=True, dtype=None,
usecols=list(range(1, 8)),
converters={'DATE': parse_date}, delimiter=',')
# Temporary variables for ease
temp = testdata['T']
pressure = testdata['P']
rh = testdata['RH']
ws = testdata['WS']
wsmax = testdata['WSMAX']
wd = testdata['WD']
date = testdata['DATE']
# ID For Plotting on Meteogram
probe_id = '0102A'
data = {'wind_speed': (np.array(ws) * units('m/s')).to(units('knots')),
'wind_speed_max': (np.array(wsmax) * units('m/s')).to(units('knots')),
'wind_direction': np.array(wd) * units('degrees'),
'dewpoint': dewpoint_from_relative_humidity((np.array(temp) * units.degC).to(units.K),
np.array(rh) / 100.).to(units('degF')),
'air_temperature': (np.array(temp) * units('degC')).to(units('degF')),
'mean_slp': calc_mslp(np.array(temp), np.array(pressure), hgt_example) * units('hPa'),
'relative_humidity': np.array(rh), 'times': np.array(date)}
fig = plt.figure(figsize=(20, 16))
add_metpy_logo(fig, 250, 180)
meteogram = Meteogram(fig, data['times'], probe_id)
meteogram.plot_winds(data['wind_speed'], data['wind_direction'], data['wind_speed_max'])
meteogram.plot_thermo(data['air_temperature'], data['dewpoint'])
meteogram.plot_rh(data['relative_humidity'])
meteogram.plot_pressure(data['mean_slp'])
fig.subplots_adjust(hspace=0.5)
plt.show()
| bsd-3-clause |
simon-pepin/scikit-learn | sklearn/neighbors/tests/test_approximate.py | 142 | 18692 | """
Testing for the approximate neighbor search using
Locality Sensitive Hashing Forest module
(sklearn.neighbors.LSHForest).
"""
# Author: Maheshakya Wijewardena, Joel Nothman
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_array_less
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.neighbors import LSHForest
from sklearn.neighbors import NearestNeighbors
def test_neighbors_accuracy_with_n_candidates():
# Checks whether accuracy increases as `n_candidates` increases.
n_candidates_values = np.array([.1, 50, 500])
n_samples = 100
n_features = 10
n_iter = 10
n_points = 5
rng = np.random.RandomState(42)
accuracies = np.zeros(n_candidates_values.shape[0], dtype=float)
X = rng.rand(n_samples, n_features)
for i, n_candidates in enumerate(n_candidates_values):
lshf = LSHForest(n_candidates=n_candidates)
lshf.fit(X)
for j in range(n_iter):
query = X[rng.randint(0, n_samples)]
neighbors = lshf.kneighbors(query, n_neighbors=n_points,
return_distance=False)
distances = pairwise_distances(query, X, metric='cosine')
ranks = np.argsort(distances)[0, :n_points]
intersection = np.intersect1d(ranks, neighbors).shape[0]
ratio = intersection / float(n_points)
accuracies[i] = accuracies[i] + ratio
accuracies[i] = accuracies[i] / float(n_iter)
# Sorted accuracies should be equal to original accuracies
assert_true(np.all(np.diff(accuracies) >= 0),
msg="Accuracies are not non-decreasing.")
# Highest accuracy should be strictly greater than the lowest
assert_true(np.ptp(accuracies) > 0,
msg="Highest accuracy is not strictly greater than lowest.")
def test_neighbors_accuracy_with_n_estimators():
# Checks whether accuracy increases as `n_estimators` increases.
n_estimators = np.array([1, 10, 100])
n_samples = 100
n_features = 10
n_iter = 10
n_points = 5
rng = np.random.RandomState(42)
accuracies = np.zeros(n_estimators.shape[0], dtype=float)
X = rng.rand(n_samples, n_features)
for i, t in enumerate(n_estimators):
lshf = LSHForest(n_candidates=500, n_estimators=t)
lshf.fit(X)
for j in range(n_iter):
query = X[rng.randint(0, n_samples)]
neighbors = lshf.kneighbors(query, n_neighbors=n_points,
return_distance=False)
distances = pairwise_distances(query, X, metric='cosine')
ranks = np.argsort(distances)[0, :n_points]
intersection = np.intersect1d(ranks, neighbors).shape[0]
ratio = intersection / float(n_points)
accuracies[i] = accuracies[i] + ratio
accuracies[i] = accuracies[i] / float(n_iter)
# Sorted accuracies should be equal to original accuracies
assert_true(np.all(np.diff(accuracies) >= 0),
msg="Accuracies are not non-decreasing.")
# Highest accuracy should be strictly greater than the lowest
assert_true(np.ptp(accuracies) > 0,
msg="Highest accuracy is not strictly greater than lowest.")
@ignore_warnings
def test_kneighbors():
# Checks whether desired number of neighbors are returned.
# It is guaranteed to return the requested number of neighbors
# if `min_hash_match` is set to 0. Returned distances should be
# in ascending order.
n_samples = 12
n_features = 2
n_iter = 10
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest(min_hash_match=0)
# Test unfitted estimator
assert_raises(ValueError, lshf.kneighbors, X[0])
lshf.fit(X)
for i in range(n_iter):
n_neighbors = rng.randint(0, n_samples)
query = X[rng.randint(0, n_samples)]
neighbors = lshf.kneighbors(query, n_neighbors=n_neighbors,
return_distance=False)
# Desired number of neighbors should be returned.
assert_equal(neighbors.shape[1], n_neighbors)
# Multiple points
n_queries = 5
queries = X[rng.randint(0, n_samples, n_queries)]
distances, neighbors = lshf.kneighbors(queries,
n_neighbors=1,
return_distance=True)
assert_equal(neighbors.shape[0], n_queries)
assert_equal(distances.shape[0], n_queries)
# Test only neighbors
neighbors = lshf.kneighbors(queries, n_neighbors=1,
return_distance=False)
assert_equal(neighbors.shape[0], n_queries)
# Test random point(not in the data set)
query = rng.randn(n_features)
lshf.kneighbors(query, n_neighbors=1,
return_distance=False)
# Test n_neighbors at initialization
neighbors = lshf.kneighbors(query, return_distance=False)
assert_equal(neighbors.shape[1], 5)
# Test `neighbors` has an integer dtype
assert_true(neighbors.dtype.kind == 'i',
msg="neighbors are not in integer dtype.")
def test_radius_neighbors():
# Checks whether Returned distances are less than `radius`
# At least one point should be returned when the `radius` is set
# to mean distance from the considering point to other points in
# the database.
# Moreover, this test compares the radius neighbors of LSHForest
# with the `sklearn.neighbors.NearestNeighbors`.
n_samples = 12
n_features = 2
n_iter = 10
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest()
# Test unfitted estimator
assert_raises(ValueError, lshf.radius_neighbors, X[0])
lshf.fit(X)
for i in range(n_iter):
# Select a random point in the dataset as the query
query = X[rng.randint(0, n_samples)]
# At least one neighbor should be returned when the radius is the
# mean distance from the query to the points of the dataset.
mean_dist = np.mean(pairwise_distances(query, X, metric='cosine'))
neighbors = lshf.radius_neighbors(query, radius=mean_dist,
return_distance=False)
assert_equal(neighbors.shape, (1,))
assert_equal(neighbors.dtype, object)
assert_greater(neighbors[0].shape[0], 0)
# All distances to points in the results of the radius query should
# be less than mean_dist
distances, neighbors = lshf.radius_neighbors(query,
radius=mean_dist,
return_distance=True)
assert_array_less(distances[0], mean_dist)
# Multiple points
n_queries = 5
queries = X[rng.randint(0, n_samples, n_queries)]
distances, neighbors = lshf.radius_neighbors(queries,
return_distance=True)
# dists and inds should not be 1D arrays or arrays of variable lengths
# hence the use of the object dtype.
assert_equal(distances.shape, (n_queries,))
assert_equal(distances.dtype, object)
assert_equal(neighbors.shape, (n_queries,))
assert_equal(neighbors.dtype, object)
# Compare with exact neighbor search
query = X[rng.randint(0, n_samples)]
mean_dist = np.mean(pairwise_distances(query, X, metric='cosine'))
nbrs = NearestNeighbors(algorithm='brute', metric='cosine').fit(X)
distances_exact, _ = nbrs.radius_neighbors(query, radius=mean_dist)
distances_approx, _ = lshf.radius_neighbors(query, radius=mean_dist)
# Radius-based queries do not sort the result points and the order
# depends on the method, the random_state and the dataset order. Therefore
# we need to sort the results ourselves before performing any comparison.
sorted_dists_exact = np.sort(distances_exact[0])
sorted_dists_approx = np.sort(distances_approx[0])
# Distances to exact neighbors are less than or equal to approximate
# counterparts as the approximate radius query might have missed some
# closer neighbors.
assert_true(np.all(np.less_equal(sorted_dists_exact,
sorted_dists_approx)))
def test_radius_neighbors_boundary_handling():
X = [[0.999, 0.001], [0.5, 0.5], [0, 1.], [-1., 0.001]]
n_points = len(X)
# Build an exact nearest neighbors model as reference model to ensure
# consistency between exact and approximate methods
nnbrs = NearestNeighbors(algorithm='brute', metric='cosine').fit(X)
# Build a LSHForest model with hyperparameter values that always guarantee
# exact results on this toy dataset.
lsfh = LSHForest(min_hash_match=0, n_candidates=n_points).fit(X)
# define a query aligned with the first axis
query = [1., 0.]
# Compute the exact cosine distances of the query to the four points of
# the dataset
dists = pairwise_distances(query, X, metric='cosine').ravel()
# The first point is almost aligned with the query (very small angle),
# the cosine distance should therefore be almost null:
assert_almost_equal(dists[0], 0, decimal=5)
# The second point form an angle of 45 degrees to the query vector
assert_almost_equal(dists[1], 1 - np.cos(np.pi / 4))
# The third point is orthogonal from the query vector hence at a distance
# exactly one:
assert_almost_equal(dists[2], 1)
# The last point is almost colinear but with opposite sign to the query
# therefore it has a cosine 'distance' very close to the maximum possible
# value of 2.
assert_almost_equal(dists[3], 2, decimal=5)
# If we query with a radius of one, all the samples except the last sample
# should be included in the results. This means that the third sample
# is lying on the boundary of the radius query:
exact_dists, exact_idx = nnbrs.radius_neighbors(query, radius=1)
approx_dists, approx_idx = lsfh.radius_neighbors(query, radius=1)
assert_array_equal(np.sort(exact_idx[0]), [0, 1, 2])
assert_array_equal(np.sort(approx_idx[0]), [0, 1, 2])
assert_array_almost_equal(np.sort(exact_dists[0]), dists[:-1])
assert_array_almost_equal(np.sort(approx_dists[0]), dists[:-1])
# If we perform the same query with a slighltly lower radius, the third
# point of the dataset that lay on the boundary of the previous query
# is now rejected:
eps = np.finfo(np.float64).eps
exact_dists, exact_idx = nnbrs.radius_neighbors(query, radius=1 - eps)
approx_dists, approx_idx = lsfh.radius_neighbors(query, radius=1 - eps)
assert_array_equal(np.sort(exact_idx[0]), [0, 1])
assert_array_equal(np.sort(approx_idx[0]), [0, 1])
assert_array_almost_equal(np.sort(exact_dists[0]), dists[:-2])
assert_array_almost_equal(np.sort(approx_dists[0]), dists[:-2])
def test_distances():
# Checks whether returned neighbors are from closest to farthest.
n_samples = 12
n_features = 2
n_iter = 10
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest()
lshf.fit(X)
for i in range(n_iter):
n_neighbors = rng.randint(0, n_samples)
query = X[rng.randint(0, n_samples)]
distances, neighbors = lshf.kneighbors(query,
n_neighbors=n_neighbors,
return_distance=True)
# Returned neighbors should be from closest to farthest, that is
# increasing distance values.
assert_true(np.all(np.diff(distances[0]) >= 0))
# Note: the radius_neighbors method does not guarantee the order of
# the results.
def test_fit():
# Checks whether `fit` method sets all attribute values correctly.
n_samples = 12
n_features = 2
n_estimators = 5
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest(n_estimators=n_estimators)
lshf.fit(X)
# _input_array = X
assert_array_equal(X, lshf._fit_X)
# A hash function g(p) for each tree
assert_equal(n_estimators, len(lshf.hash_functions_))
# Hash length = 32
assert_equal(32, lshf.hash_functions_[0].components_.shape[0])
# Number of trees_ in the forest
assert_equal(n_estimators, len(lshf.trees_))
# Each tree has entries for every data point
assert_equal(n_samples, len(lshf.trees_[0]))
# Original indices after sorting the hashes
assert_equal(n_estimators, len(lshf.original_indices_))
# Each set of original indices in a tree has entries for every data point
assert_equal(n_samples, len(lshf.original_indices_[0]))
def test_partial_fit():
# Checks whether inserting array is consitent with fitted data.
# `partial_fit` method should set all attribute values correctly.
n_samples = 12
n_samples_partial_fit = 3
n_features = 2
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
X_partial_fit = rng.rand(n_samples_partial_fit, n_features)
lshf = LSHForest()
# Test unfitted estimator
lshf.partial_fit(X)
assert_array_equal(X, lshf._fit_X)
lshf.fit(X)
# Insert wrong dimension
assert_raises(ValueError, lshf.partial_fit,
np.random.randn(n_samples_partial_fit, n_features - 1))
lshf.partial_fit(X_partial_fit)
# size of _input_array = samples + 1 after insertion
assert_equal(lshf._fit_X.shape[0],
n_samples + n_samples_partial_fit)
# size of original_indices_[1] = samples + 1
assert_equal(len(lshf.original_indices_[0]),
n_samples + n_samples_partial_fit)
# size of trees_[1] = samples + 1
assert_equal(len(lshf.trees_[1]),
n_samples + n_samples_partial_fit)
def test_hash_functions():
# Checks randomness of hash functions.
# Variance and mean of each hash function (projection vector)
# should be different from flattened array of hash functions.
# If hash functions are not randomly built (seeded with
# same value), variances and means of all functions are equal.
n_samples = 12
n_features = 2
n_estimators = 5
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest(n_estimators=n_estimators,
random_state=rng.randint(0, np.iinfo(np.int32).max))
lshf.fit(X)
hash_functions = []
for i in range(n_estimators):
hash_functions.append(lshf.hash_functions_[i].components_)
for i in range(n_estimators):
assert_not_equal(np.var(hash_functions),
np.var(lshf.hash_functions_[i].components_))
for i in range(n_estimators):
assert_not_equal(np.mean(hash_functions),
np.mean(lshf.hash_functions_[i].components_))
def test_candidates():
# Checks whether candidates are sufficient.
# This should handle the cases when number of candidates is 0.
# User should be warned when number of candidates is less than
# requested number of neighbors.
X_train = np.array([[5, 5, 2], [21, 5, 5], [1, 1, 1], [8, 9, 1],
[6, 10, 2]], dtype=np.float32)
X_test = np.array([7, 10, 3], dtype=np.float32)
# For zero candidates
lshf = LSHForest(min_hash_match=32)
lshf.fit(X_train)
message = ("Number of candidates is not sufficient to retrieve"
" %i neighbors with"
" min_hash_match = %i. Candidates are filled up"
" uniformly from unselected"
" indices." % (3, 32))
assert_warns_message(UserWarning, message, lshf.kneighbors,
X_test, n_neighbors=3)
distances, neighbors = lshf.kneighbors(X_test, n_neighbors=3)
assert_equal(distances.shape[1], 3)
# For candidates less than n_neighbors
lshf = LSHForest(min_hash_match=31)
lshf.fit(X_train)
message = ("Number of candidates is not sufficient to retrieve"
" %i neighbors with"
" min_hash_match = %i. Candidates are filled up"
" uniformly from unselected"
" indices." % (5, 31))
assert_warns_message(UserWarning, message, lshf.kneighbors,
X_test, n_neighbors=5)
distances, neighbors = lshf.kneighbors(X_test, n_neighbors=5)
assert_equal(distances.shape[1], 5)
def test_graphs():
# Smoke tests for graph methods.
n_samples_sizes = [5, 10, 20]
n_features = 3
rng = np.random.RandomState(42)
for n_samples in n_samples_sizes:
X = rng.rand(n_samples, n_features)
lshf = LSHForest(min_hash_match=0)
lshf.fit(X)
kneighbors_graph = lshf.kneighbors_graph(X)
radius_neighbors_graph = lshf.radius_neighbors_graph(X)
assert_equal(kneighbors_graph.shape[0], n_samples)
assert_equal(kneighbors_graph.shape[1], n_samples)
assert_equal(radius_neighbors_graph.shape[0], n_samples)
assert_equal(radius_neighbors_graph.shape[1], n_samples)
def test_sparse_input():
# note: Fixed random state in sp.rand is not supported in older scipy.
# The test should succeed regardless.
X1 = sp.rand(50, 100)
X2 = sp.rand(10, 100)
forest_sparse = LSHForest(radius=1, random_state=0).fit(X1)
forest_dense = LSHForest(radius=1, random_state=0).fit(X1.A)
d_sparse, i_sparse = forest_sparse.kneighbors(X2, return_distance=True)
d_dense, i_dense = forest_dense.kneighbors(X2.A, return_distance=True)
assert_almost_equal(d_sparse, d_dense)
assert_almost_equal(i_sparse, i_dense)
d_sparse, i_sparse = forest_sparse.radius_neighbors(X2,
return_distance=True)
d_dense, i_dense = forest_dense.radius_neighbors(X2.A,
return_distance=True)
assert_equal(d_sparse.shape, d_dense.shape)
for a, b in zip(d_sparse, d_dense):
assert_almost_equal(a, b)
for a, b in zip(i_sparse, i_dense):
assert_almost_equal(a, b)
| bsd-3-clause |
hainm/scikit-learn | sklearn/datasets/base.py | 196 | 18554 | """
Base IO code for all datasets
"""
# Copyright (c) 2007 David Cournapeau <[email protected]>
# 2010 Fabian Pedregosa <[email protected]>
# 2010 Olivier Grisel <[email protected]>
# License: BSD 3 clause
import os
import csv
import shutil
from os import environ
from os.path import dirname
from os.path import join
from os.path import exists
from os.path import expanduser
from os.path import isdir
from os import listdir
from os import makedirs
import numpy as np
from ..utils import check_random_state
class Bunch(dict):
"""Container object for datasets
Dictionary-like object that exposes its keys as attributes.
>>> b = Bunch(a=1, b=2)
>>> b['b']
2
>>> b.b
2
>>> b.a = 3
>>> b['a']
3
>>> b.c = 6
>>> b['c']
6
"""
def __init__(self, **kwargs):
dict.__init__(self, kwargs)
def __setattr__(self, key, value):
self[key] = value
def __getattr__(self, key):
try:
return self[key]
except KeyError:
raise AttributeError(key)
def __getstate__(self):
return self.__dict__
def get_data_home(data_home=None):
"""Return the path of the scikit-learn data dir.
This folder is used by some large dataset loaders to avoid
downloading the data several times.
By default the data dir is set to a folder named 'scikit_learn_data'
in the user home folder.
Alternatively, it can be set by the 'SCIKIT_LEARN_DATA' environment
variable or programmatically by giving an explicit folder path. The
'~' symbol is expanded to the user home folder.
If the folder does not already exist, it is automatically created.
"""
if data_home is None:
data_home = environ.get('SCIKIT_LEARN_DATA',
join('~', 'scikit_learn_data'))
data_home = expanduser(data_home)
if not exists(data_home):
makedirs(data_home)
return data_home
def clear_data_home(data_home=None):
"""Delete all the content of the data home cache."""
data_home = get_data_home(data_home)
shutil.rmtree(data_home)
def load_files(container_path, description=None, categories=None,
load_content=True, shuffle=True, encoding=None,
decode_error='strict', random_state=0):
"""Load text files with categories as subfolder names.
Individual samples are assumed to be files stored a two levels folder
structure such as the following:
container_folder/
category_1_folder/
file_1.txt
file_2.txt
...
file_42.txt
category_2_folder/
file_43.txt
file_44.txt
...
The folder names are used as supervised signal label names. The
individual file names are not important.
This function does not try to extract features into a numpy array or
scipy sparse matrix. In addition, if load_content is false it
does not try to load the files in memory.
To use text files in a scikit-learn classification or clustering
algorithm, you will need to use the `sklearn.feature_extraction.text`
module to build a feature extraction transformer that suits your
problem.
If you set load_content=True, you should also specify the encoding of
the text using the 'encoding' parameter. For many modern text files,
'utf-8' will be the correct encoding. If you leave encoding equal to None,
then the content will be made of bytes instead of Unicode, and you will
not be able to use most functions in `sklearn.feature_extraction.text`.
Similar feature extractors should be built for other kind of unstructured
data input such as images, audio, video, ...
Read more in the :ref:`User Guide <datasets>`.
Parameters
----------
container_path : string or unicode
Path to the main folder holding one subfolder per category
description: string or unicode, optional (default=None)
A paragraph describing the characteristic of the dataset: its source,
reference, etc.
categories : A collection of strings or None, optional (default=None)
If None (default), load all the categories.
If not None, list of category names to load (other categories ignored).
load_content : boolean, optional (default=True)
Whether to load or not the content of the different files. If
true a 'data' attribute containing the text information is present
in the data structure returned. If not, a filenames attribute
gives the path to the files.
encoding : string or None (default is None)
If None, do not try to decode the content of the files (e.g. for
images or other non-text content).
If not None, encoding to use to decode text files to Unicode if
load_content is True.
decode_error: {'strict', 'ignore', 'replace'}, optional
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. Passed as keyword
argument 'errors' to bytes.decode.
shuffle : bool, optional (default=True)
Whether or not to shuffle the data: might be important for models that
make the assumption that the samples are independent and identically
distributed (i.i.d.), such as stochastic gradient descent.
random_state : int, RandomState instance or None, optional (default=0)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are: either
data, the raw text data to learn, or 'filenames', the files
holding it, 'target', the classification labels (integer index),
'target_names', the meaning of the labels, and 'DESCR', the full
description of the dataset.
"""
target = []
target_names = []
filenames = []
folders = [f for f in sorted(listdir(container_path))
if isdir(join(container_path, f))]
if categories is not None:
folders = [f for f in folders if f in categories]
for label, folder in enumerate(folders):
target_names.append(folder)
folder_path = join(container_path, folder)
documents = [join(folder_path, d)
for d in sorted(listdir(folder_path))]
target.extend(len(documents) * [label])
filenames.extend(documents)
# convert to array for fancy indexing
filenames = np.array(filenames)
target = np.array(target)
if shuffle:
random_state = check_random_state(random_state)
indices = np.arange(filenames.shape[0])
random_state.shuffle(indices)
filenames = filenames[indices]
target = target[indices]
if load_content:
data = []
for filename in filenames:
with open(filename, 'rb') as f:
data.append(f.read())
if encoding is not None:
data = [d.decode(encoding, decode_error) for d in data]
return Bunch(data=data,
filenames=filenames,
target_names=target_names,
target=target,
DESCR=description)
return Bunch(filenames=filenames,
target_names=target_names,
target=target,
DESCR=description)
def load_iris():
"""Load and return the iris dataset (classification).
The iris dataset is a classic and very easy multi-class classification
dataset.
================= ==============
Classes 3
Samples per class 50
Samples total 150
Dimensionality 4
Features real, positive
================= ==============
Read more in the :ref:`User Guide <datasets>`.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'target', the classification labels,
'target_names', the meaning of the labels, 'feature_names', the
meaning of the features, and 'DESCR', the
full description of the dataset.
Examples
--------
Let's say you are interested in the samples 10, 25, and 50, and want to
know their class name.
>>> from sklearn.datasets import load_iris
>>> data = load_iris()
>>> data.target[[10, 25, 50]]
array([0, 0, 1])
>>> list(data.target_names)
['setosa', 'versicolor', 'virginica']
"""
module_path = dirname(__file__)
with open(join(module_path, 'data', 'iris.csv')) as csv_file:
data_file = csv.reader(csv_file)
temp = next(data_file)
n_samples = int(temp[0])
n_features = int(temp[1])
target_names = np.array(temp[2:])
data = np.empty((n_samples, n_features))
target = np.empty((n_samples,), dtype=np.int)
for i, ir in enumerate(data_file):
data[i] = np.asarray(ir[:-1], dtype=np.float)
target[i] = np.asarray(ir[-1], dtype=np.int)
with open(join(module_path, 'descr', 'iris.rst')) as rst_file:
fdescr = rst_file.read()
return Bunch(data=data, target=target,
target_names=target_names,
DESCR=fdescr,
feature_names=['sepal length (cm)', 'sepal width (cm)',
'petal length (cm)', 'petal width (cm)'])
def load_digits(n_class=10):
"""Load and return the digits dataset (classification).
Each datapoint is a 8x8 image of a digit.
================= ==============
Classes 10
Samples per class ~180
Samples total 1797
Dimensionality 64
Features integers 0-16
================= ==============
Read more in the :ref:`User Guide <datasets>`.
Parameters
----------
n_class : integer, between 0 and 10, optional (default=10)
The number of classes to return.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'images', the images corresponding
to each sample, 'target', the classification labels for each
sample, 'target_names', the meaning of the labels, and 'DESCR',
the full description of the dataset.
Examples
--------
To load the data and visualize the images::
>>> from sklearn.datasets import load_digits
>>> digits = load_digits()
>>> print(digits.data.shape)
(1797, 64)
>>> import pylab as pl #doctest: +SKIP
>>> pl.gray() #doctest: +SKIP
>>> pl.matshow(digits.images[0]) #doctest: +SKIP
>>> pl.show() #doctest: +SKIP
"""
module_path = dirname(__file__)
data = np.loadtxt(join(module_path, 'data', 'digits.csv.gz'),
delimiter=',')
with open(join(module_path, 'descr', 'digits.rst')) as f:
descr = f.read()
target = data[:, -1]
flat_data = data[:, :-1]
images = flat_data.view()
images.shape = (-1, 8, 8)
if n_class < 10:
idx = target < n_class
flat_data, target = flat_data[idx], target[idx]
images = images[idx]
return Bunch(data=flat_data,
target=target.astype(np.int),
target_names=np.arange(10),
images=images,
DESCR=descr)
def load_diabetes():
"""Load and return the diabetes dataset (regression).
============== ==================
Samples total 442
Dimensionality 10
Features real, -.2 < x < .2
Targets integer 25 - 346
============== ==================
Read more in the :ref:`User Guide <datasets>`.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn and 'target', the regression target for each
sample.
"""
base_dir = join(dirname(__file__), 'data')
data = np.loadtxt(join(base_dir, 'diabetes_data.csv.gz'))
target = np.loadtxt(join(base_dir, 'diabetes_target.csv.gz'))
return Bunch(data=data, target=target)
def load_linnerud():
"""Load and return the linnerud dataset (multivariate regression).
Samples total: 20
Dimensionality: 3 for both data and targets
Features: integer
Targets: integer
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are: 'data' and
'targets', the two multivariate datasets, with 'data' corresponding to
the exercise and 'targets' corresponding to the physiological
measurements, as well as 'feature_names' and 'target_names'.
"""
base_dir = join(dirname(__file__), 'data/')
# Read data
data_exercise = np.loadtxt(base_dir + 'linnerud_exercise.csv', skiprows=1)
data_physiological = np.loadtxt(base_dir + 'linnerud_physiological.csv',
skiprows=1)
# Read header
with open(base_dir + 'linnerud_exercise.csv') as f:
header_exercise = f.readline().split()
with open(base_dir + 'linnerud_physiological.csv') as f:
header_physiological = f.readline().split()
with open(dirname(__file__) + '/descr/linnerud.rst') as f:
descr = f.read()
return Bunch(data=data_exercise, feature_names=header_exercise,
target=data_physiological,
target_names=header_physiological,
DESCR=descr)
def load_boston():
"""Load and return the boston house-prices dataset (regression).
============== ==============
Samples total 506
Dimensionality 13
Features real, positive
Targets real 5. - 50.
============== ==============
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'target', the regression targets,
and 'DESCR', the full description of the dataset.
Examples
--------
>>> from sklearn.datasets import load_boston
>>> boston = load_boston()
>>> print(boston.data.shape)
(506, 13)
"""
module_path = dirname(__file__)
fdescr_name = join(module_path, 'descr', 'boston_house_prices.rst')
with open(fdescr_name) as f:
descr_text = f.read()
data_file_name = join(module_path, 'data', 'boston_house_prices.csv')
with open(data_file_name) as f:
data_file = csv.reader(f)
temp = next(data_file)
n_samples = int(temp[0])
n_features = int(temp[1])
data = np.empty((n_samples, n_features))
target = np.empty((n_samples,))
temp = next(data_file) # names of features
feature_names = np.array(temp)
for i, d in enumerate(data_file):
data[i] = np.asarray(d[:-1], dtype=np.float)
target[i] = np.asarray(d[-1], dtype=np.float)
return Bunch(data=data,
target=target,
# last column is target value
feature_names=feature_names[:-1],
DESCR=descr_text)
def load_sample_images():
"""Load sample images for image manipulation.
Loads both, ``china`` and ``flower``.
Returns
-------
data : Bunch
Dictionary-like object with the following attributes :
'images', the two sample images, 'filenames', the file
names for the images, and 'DESCR'
the full description of the dataset.
Examples
--------
To load the data and visualize the images:
>>> from sklearn.datasets import load_sample_images
>>> dataset = load_sample_images() #doctest: +SKIP
>>> len(dataset.images) #doctest: +SKIP
2
>>> first_img_data = dataset.images[0] #doctest: +SKIP
>>> first_img_data.shape #doctest: +SKIP
(427, 640, 3)
>>> first_img_data.dtype #doctest: +SKIP
dtype('uint8')
"""
# Try to import imread from scipy. We do this lazily here to prevent
# this module from depending on PIL.
try:
try:
from scipy.misc import imread
except ImportError:
from scipy.misc.pilutil import imread
except ImportError:
raise ImportError("The Python Imaging Library (PIL) "
"is required to load data from jpeg files")
module_path = join(dirname(__file__), "images")
with open(join(module_path, 'README.txt')) as f:
descr = f.read()
filenames = [join(module_path, filename)
for filename in os.listdir(module_path)
if filename.endswith(".jpg")]
# Load image data for each image in the source folder.
images = [imread(filename) for filename in filenames]
return Bunch(images=images,
filenames=filenames,
DESCR=descr)
def load_sample_image(image_name):
"""Load the numpy array of a single sample image
Parameters
-----------
image_name: {`china.jpg`, `flower.jpg`}
The name of the sample image loaded
Returns
-------
img: 3D array
The image as a numpy array: height x width x color
Examples
---------
>>> from sklearn.datasets import load_sample_image
>>> china = load_sample_image('china.jpg') # doctest: +SKIP
>>> china.dtype # doctest: +SKIP
dtype('uint8')
>>> china.shape # doctest: +SKIP
(427, 640, 3)
>>> flower = load_sample_image('flower.jpg') # doctest: +SKIP
>>> flower.dtype # doctest: +SKIP
dtype('uint8')
>>> flower.shape # doctest: +SKIP
(427, 640, 3)
"""
images = load_sample_images()
index = None
for i, filename in enumerate(images.filenames):
if filename.endswith(image_name):
index = i
break
if index is None:
raise AttributeError("Cannot find sample image: %s" % image_name)
return images.images[index]
| bsd-3-clause |
bhargavasana/synthpop | demos/sfcta/synthesize_sfcta.py | 2 | 4155 |
# coding: utf-8
from sfcta_starter import SFCTAStarter
from sfcta_starter_hh import SFCTAStarterHouseholds
from sfcta_starter_gq import SFCTAStarterGroupQuarters
from synthpop.synthesizer import synthesize_all, enable_logging
import pandas as pd
import argparse
import os
import re
import sys
if __name__=='__main__':
parser = argparse.ArgumentParser(description='Synthesize population given SFCTA-formatted input files.')
# parser.add_argument('census_api_key', help="Census API key.")
parser.add_argument('PUMA_data_dir', help="Location of PUMA data. E.g. Q:\Model Development\Population Synthesizer\2. Base Year Eval\PopGen input from ACS20082012\by_st_puma10")
parser.add_argument('fips_file', help="Census FIPS (Federal Information Processing Standards) file. Probably Q:\Data\Surveys\Census\PUMS&PUMA\national_county.txt")
parser.add_argument('controls_csv', help="Controls CSV file. Probably output by createControls.py in Q:\Model Development\Population Synthesizer\pythonlib")
parser.add_argument('--tazlist', help="A list of TAZs for which to synthesize the population. Comma-delimited, ranges ok. e.g. 1-10,12,20-30")
parser_args = parser.parse_args()
# This needs to end in a \
if parser_args.PUMA_data_dir[-1] != "\\":
parser_args.PUMA_data_dir = parser_args.PUMA_data_dir + "\\"
# No census API key needed since the files are local -- set it to a dummy
parser_args.census_api_key = "this_is_unused"
print "census_api_key = [%s]" % parser_args.census_api_key
print "PUMA_data_dir = [%s]" % parser_args.PUMA_data_dir
print "fips_file = [%s]" % parser_args.fips_file
print "controls_csv = [%s]" % parser_args.controls_csv
print "tazlist = [%s]" % parser_args.tazlist
# parse the TAZ set
taz_set = set()
if parser_args.tazlist != None:
range_re = re.compile("^(\d+)(\-(\d+))?$")
tazlist_str = parser_args.tazlist.split(",")
for taz_str in tazlist_str:
# each element must be either an int or a range
match = re.match(range_re, taz_str)
if match == None:
print "Don't understand tazlist argument '%s'" % parser_args.tazlist
print parser.format_help()
sys.exit(2)
if match.group(3) == None:
taz_set.add(int(match.group(1)))
else:
assert(int(match.group(3)) > int(match.group(1)))
taz_set.update(range(int(match.group(1)), int(match.group(3))+1))
print "taz_set = [%s]" % str(taz_set)
# enable_logging()
starter_hh = SFCTAStarterHouseholds(parser_args.census_api_key,
parser_args.controls_csv, taz_set,
parser_args.PUMA_data_dir, parser_args.fips_file,
write_households_csv="households.csv",
write_persons_csv="persons.csv")
households, people, fit_quality = synthesize_all(starter_hh, indexes=None)
gq_start_hhid = starter_hh.start_hhid
gq_start_persid = starter_hh.start_persid
# close the file
del starter_hh
starter_gq = SFCTAStarterGroupQuarters(parser_args.census_api_key,
parser_args.controls_csv, taz_set,
parser_args.PUMA_data_dir, parser_args.fips_file,
write_households_csv="households.csv",
write_persons_csv="persons.csv",
write_append=True,
start_hhid=gq_start_hhid,
start_persid=gq_start_persid)
households_gq, people_gq, fit_quality_gq = synthesize_all(starter_gq, indexes=None)
# close the file
del starter_gq
sys.exit()
for geo, qual in fit_quality.items():
print 'Geography: {}'.format(geo[0])
# print ' household chisq: {}'.format(qual.household_chisq)
# print ' household p: {}'.format(qual.household_p)
print ' people chisq: {}'.format(qual.people_chisq)
print ' people p: {}'.format(qual.people_p)
| bsd-3-clause |
rseubert/scikit-learn | examples/datasets/plot_random_multilabel_dataset.py | 93 | 3460 | """
==============================================
Plot randomly generated multilabel dataset
==============================================
This illustrates the `datasets.make_multilabel_classification` dataset
generator. Each sample consists of counts of two features (up to 50 in
total), which are differently distributed in each of two classes.
Points are labeled as follows, where Y means the class is present:
===== ===== ===== ======
1 2 3 Color
===== ===== ===== ======
Y N N Red
N Y N Blue
N N Y Yellow
Y Y N Purple
Y N Y Orange
Y Y N Green
Y Y Y Brown
===== ===== ===== ======
A star marks the expected sample for each class; its size reflects the
probability of selecting that class label.
The left and right examples highlight the ``n_labels`` parameter:
more of the samples in the right plot have 2 or 3 labels.
Note that this two-dimensional example is very degenerate:
generally the number of features would be much greater than the
"document length", while here we have much larger documents than vocabulary.
Similarly, with ``n_classes > n_features``, it is much less likely that a
feature distinguishes a particular class.
"""
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_multilabel_classification as make_ml_clf
print(__doc__)
COLORS = np.array(['!',
'#FF3333', # red
'#0198E1', # blue
'#BF5FFF', # purple
'#FCD116', # yellow
'#FF7216', # orange
'#4DBD33', # green
'#87421F' # brown
])
# Use same random seed for multiple calls to make_multilabel_classification to
# ensure same distributions
RANDOM_SEED = np.random.randint(2 ** 10)
def plot_2d(ax, n_labels=1, n_classes=3, length=50):
X, Y, p_c, p_w_c = make_ml_clf(n_samples=150, n_features=2,
n_classes=n_classes, n_labels=n_labels,
length=length, allow_unlabeled=False,
return_indicator=True,
return_distributions=True,
random_state=RANDOM_SEED)
ax.scatter(X[:, 0], X[:, 1], color=COLORS.take((Y * [1, 2, 4]
).sum(axis=1)),
marker='.')
ax.scatter(p_w_c[0] * length, p_w_c[1] * length,
marker='*', linewidth=.5, edgecolor='black',
s=20 + 1500 * p_c ** 2,
color=COLORS.take([1, 2, 4]))
ax.set_xlabel('Feature 0 count')
return p_c, p_w_c
_, (ax1, ax2) = plt.subplots(1, 2, sharex='row', sharey='row', figsize=(8, 4))
plt.subplots_adjust(bottom=.15)
p_c, p_w_c = plot_2d(ax1, n_labels=1)
ax1.set_title('n_labels=1, length=50')
ax1.set_ylabel('Feature 1 count')
plot_2d(ax2, n_labels=3)
ax2.set_title('n_labels=3, length=50')
ax2.set_xlim(left=0, auto=True)
ax2.set_ylim(bottom=0, auto=True)
plt.show()
print('The data was generated from (random_state=%d):' % RANDOM_SEED)
print('Class', 'P(C)', 'P(w0|C)', 'P(w1|C)', sep='\t')
for k, p, p_w in zip(['red', 'blue', 'yellow'], p_c, p_w_c.T):
print('%s\t%0.2f\t%0.2f\t%0.2f' % (k, p, p_w[0], p_w[1]))
| bsd-3-clause |
nsdf/nsdf | examples/moose_Multi/multi1.py | 1 | 17578 | # multi1.py ---
# Upi Bhalla, NCBS Bangalore 2014.
#
# Commentary:
#
# This loads in a low-detail model incorporating
# reac-diff and elec signaling in neurons. The reac-diff model
# has just Ca and CaM in it, and there are no-cross-compartment
# reactions though Ca diffuses everywhere. The elec model controls the
# Ca levels in the chem compartments.
# This version uses solvers for both chem and electrical parts.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street, Fifth
# Floor, Boston, MA 02110-1301, USA.
#
# Code:
import sys
#sys.path.append('../../python')
import os
os.environ['NUMPTHREADS'] = '1'
import math
import numpy
import matplotlib.pyplot as plt
import moose
import proto18
EREST_ACT = -70e-3
def loadElec():
library = moose.Neutral( '/library' )
moose.setCwe( '/library' )
proto18.make_Ca()
proto18.make_Ca_conc()
proto18.make_K_AHP()
proto18.make_K_C()
proto18.make_Na()
proto18.make_K_DR()
proto18.make_K_A()
proto18.make_glu()
proto18.make_NMDA()
proto18.make_Ca_NMDA()
proto18.make_NMDA_Ca_conc()
proto18.make_axon()
moose.setCwe( '/library' )
model = moose.Neutral( '/model' )
cellId = moose.loadModel( 'ca1_asym.p', '/model/elec', "Neutral" )
return cellId
def loadChem( diffLength ):
chem = moose.Neutral( '/model/chem' )
neuroCompt = moose.NeuroMesh( '/model/chem/kinetics' )
neuroCompt.separateSpines = 1
neuroCompt.geometryPolicy = 'cylinder'
spineCompt = moose.SpineMesh( '/model/chem/compartment_1' )
moose.connect( neuroCompt, 'spineListOut', spineCompt, 'spineList', 'OneToOne' )
psdCompt = moose.PsdMesh( '/model/chem/compartment_2' )
#print 'Meshvolume[neuro, spine, psd] = ', neuroCompt.mesh[0].volume, spineCompt.mesh[0].volume, psdCompt.mesh[0].volume
moose.connect( neuroCompt, 'psdListOut', psdCompt, 'psdList', 'OneToOne' )
modelId = moose.loadModel( 'minimal.g', '/model/chem', 'ee' )
#modelId = moose.loadModel( 'psd_merged31d.g', '/model/chem', 'ee' )
neuroCompt.name = 'dend'
spineCompt.name = 'spine'
psdCompt.name = 'psd'
def makeNeuroMeshModel():
diffLength = 10e-6 # Aim for 2 soma compartments.
elec = loadElec()
loadChem( diffLength )
neuroCompt = moose.element( '/model/chem/dend' )
neuroCompt.diffLength = diffLength
neuroCompt.cellPortion( elec, '/model/elec/#' )
for x in moose.wildcardFind( '/model/chem/##[ISA=PoolBase]' ):
if (x.diffConst > 0):
x.diffConst = 1e-11
for x in moose.wildcardFind( '/model/chem/##/Ca' ):
x.diffConst = 1e-10
# Put in dend solvers
ns = neuroCompt.numSegments
ndc = neuroCompt.numDiffCompts
print 'ns = ', ns, ', ndc = ', ndc
assert( neuroCompt.numDiffCompts == neuroCompt.mesh.num )
assert( ns == 36 ) #
assert( ndc == 278 ) #
nmksolve = moose.Ksolve( '/model/chem/dend/ksolve' )
nmdsolve = moose.Dsolve( '/model/chem/dend/dsolve' )
nmstoich = moose.Stoich( '/model/chem/dend/stoich' )
nmstoich.compartment = neuroCompt
nmstoich.ksolve = nmksolve
nmstoich.dsolve = nmdsolve
nmstoich.path = "/model/chem/dend/##"
print 'done setting path, numPools = ', nmdsolve.numPools
assert( nmdsolve.numPools == 1 )
assert( nmdsolve.numAllVoxels == ndc )
assert( nmstoich.numAllPools == 1 )
# oddly, numLocalFields does not work.
ca = moose.element( '/model/chem/dend/DEND/Ca' )
assert( ca.numData == ndc )
# Put in spine solvers. Note that these get info from the neuroCompt
spineCompt = moose.element( '/model/chem/spine' )
sdc = spineCompt.mesh.num
print 'sdc = ', sdc
assert( sdc == 13 )
smksolve = moose.Ksolve( '/model/chem/spine/ksolve' )
smdsolve = moose.Dsolve( '/model/chem/spine/dsolve' )
smstoich = moose.Stoich( '/model/chem/spine/stoich' )
smstoich.compartment = spineCompt
smstoich.ksolve = smksolve
smstoich.dsolve = smdsolve
smstoich.path = "/model/chem/spine/##"
print('spine num Pools = ', smstoich.numAllPools)
assert( smstoich.numAllPools == 3 )
assert( smdsolve.numPools == 3 )
assert( smdsolve.numAllVoxels == sdc )
# Put in PSD solvers. Note that these get info from the neuroCompt
psdCompt = moose.element( '/model/chem/psd' )
pdc = psdCompt.mesh.num
assert( pdc == 13 )
pmksolve = moose.Ksolve( '/model/chem/psd/ksolve' )
pmdsolve = moose.Dsolve( '/model/chem/psd/dsolve' )
pmstoich = moose.Stoich( '/model/chem/psd/stoich' )
pmstoich.compartment = psdCompt
pmstoich.ksolve = pmksolve
pmstoich.dsolve = pmdsolve
pmstoich.path = "/model/chem/psd/##"
assert( pmstoich.numAllPools == 3 )
assert( pmdsolve.numPools == 3 )
assert( pmdsolve.numAllVoxels == pdc )
foo = moose.element( '/model/chem/psd/Ca' )
print( 'PSD: numfoo = ', foo.numData)
print( 'PSD: numAllVoxels = ', pmksolve.numAllVoxels)
# Put in junctions between the diffusion solvers
nmdsolve.buildNeuroMeshJunctions( smdsolve, pmdsolve )
"""
CaNpsd = moose.vec( '/model/chem/psdMesh/PSD/PP1_PSD/CaN' )
print 'numCaN in PSD = ', CaNpsd.nInit, ', vol = ', CaNpsd.volume
CaNspine = moose.vec( '/model/chem/spine/SPINE/CaN_BULK/CaN' )
print 'numCaN in spine = ', CaNspine.nInit, ', vol = ', CaNspine.volume
"""
##################################################################
# set up adaptors
aCa = moose.Adaptor( '/model/chem/spine/adaptCa', sdc )
adaptCa = moose.vec( '/model/chem/spine/adaptCa' )
chemCa = moose.vec( '/model/chem/spine/Ca' )
#print 'aCa = ', aCa, ' foo = ', foo, "len( ChemCa ) = ", len( chemCa ), ", numData = ", chemCa.numData, "len( adaptCa ) = ", len( adaptCa )
assert( len( adaptCa ) == sdc )
assert( len( chemCa ) == sdc )
for i in range( sdc ):
elecCa = moose.element( '/model/elec/spine_head_14_' + str(i+1) + '/NMDA_Ca_conc' )
#print elecCa
moose.connect( elecCa, 'concOut', adaptCa[i], 'input', 'Single' )
moose.connect( adaptCa, 'output', chemCa, 'setConc', 'OneToOne' )
adaptCa.inputOffset = 0.0 #
adaptCa.outputOffset = 0.00008 # 80 nM offset in chem.
adaptCa.scale = 1e-4 # 520 to 0.0052 mM
#print adaptCa.outputOffset
moose.le( '/model/chem/dend/DEND' )
compts = neuroCompt.elecComptList
begin = neuroCompt.startVoxelInCompt
end = neuroCompt.endVoxelInCompt
aCa = moose.Adaptor( '/model/chem/dend/DEND/adaptCa', len( compts))
adaptCa = moose.vec( '/model/chem/dend/DEND/adaptCa' )
chemCa = moose.vec( '/model/chem/dend/DEND/Ca' )
#print 'aCa = ', aCa, ' foo = ', foo, "len( ChemCa ) = ", len( chemCa ), ", numData = ", chemCa.numData, "len( adaptCa ) = ", len( adaptCa )
assert( len( chemCa ) == ndc )
for i in zip( compts, adaptCa, begin, end ):
name = i[0].path + '/Ca_conc'
if ( moose.exists( name ) ):
elecCa = moose.element( name )
#print i[2], i[3], ' ', elecCa
#print i[1]
moose.connect( elecCa, 'concOut', i[1], 'input', 'Single' )
for j in range( i[2], i[3] ):
moose.connect( i[1], 'output', chemCa[j], 'setConc', 'Single' )
adaptCa.inputOffset = 0.0 #
adaptCa.outputOffset = 0.00008 # 80 nM offset in chem.
adaptCa.scale = 20e-6 # 10 arb units to 2 uM.
def addPlot( objpath, field, plot ):
#assert moose.exists( objpath )
if moose.exists( objpath ):
tab = moose.Table( '/graphs/' + plot )
obj = moose.element( objpath )
if obj.className == 'Neutral':
print( "addPlot failed: object is a Neutral: ", objpath)
return moose.element( '/' )
else:
#print "object was found: ", objpath, obj.className
moose.connect( tab, 'requestOut', obj, field )
return tab
else:
print( "addPlot failed: object not found: ", objpath)
return moose.element( '/' )
def makeCaPlots():
graphs = moose.Neutral( '/graphs' )
ca = moose.Neutral( '/graphs/ca' )
addPlot( '/model/elec/soma/Ca_conc', 'getCa', 'ca/somaCa' )
addPlot( '/model/elec/lat_11_2/Ca_conc', 'getCa', 'ca/lat11Ca' )
addPlot( '/model/elec/spine_head_14_4/NMDA_Ca_conc', 'getCa', 'ca/spine4Ca' )
addPlot( '/model/elec/spine_head_14_12/NMDA_Ca_conc', 'getCa', 'ca/spine12Ca' )
def makeElecPlots():
graphs = moose.Neutral( '/graphs' )
elec = moose.Neutral( '/graphs/elec' )
addPlot( '/model/elec/soma', 'getVm', 'elec/somaVm' )
addPlot( '/model/elec/spine_head_14_4', 'getVm', 'elec/spineVm' )
def makeChemPlots():
graphs = moose.Neutral( '/graphs' )
chem = moose.Neutral( '/graphs/chem' )
addPlot( '/model/chem/psd/Ca_CaM', 'getConc', 'chem/psdCaCam' )
addPlot( '/model/chem/psd/Ca', 'getConc', 'chem/psdCa' )
addPlot( '/model/chem/spine/Ca_CaM', 'getConc', 'chem/spineCaCam' )
addPlot( '/model/chem/spine/Ca[3]', 'getConc', 'chem/spine4Ca' )
addPlot( '/model/chem/spine/Ca[11]', 'getConc', 'chem/spine12Ca' )
addPlot( '/model/chem/dend/DEND/Ca', 'getConc', 'chem/dendCa' )
addPlot( '/model/chem/dend/DEND/Ca[20]', 'getConc', 'chem/dendCa20' )
def makeGraphics():
plt.ion()
fig = plt.figure( figsize=(10,16) )
chem = fig.add_subplot( 411 )
chem.set_ylim( 0, 0.006 )
plt.ylabel( 'Conc (mM)' )
plt.xlabel( 'time (seconds)' )
plt.legend()
elec = fig.add_subplot( 412 )
plt.ylabel( 'Vm (V)' )
plt.xlabel( 'time (seconds)' )
plt.legend()
ca = fig.add_subplot( 413 )
plt.ylabel( '[Ca] (mM)' )
plt.xlabel( 'time (seconds)' )
plt.legend()
lenplot = fig.add_subplot( 414 )
plt.ylabel( 'Ca (mM )' )
plt.xlabel( 'Voxel#)' )
plt.legend()
spineCa = moose.vec( '/model/chem/spine/Ca' )
dendCa = moose.vec( '/model/chem/dend/DEND/Ca' )
line1, = lenplot.plot( range( len( spineCa ) ), spineCa.conc, label='spine' )
line2, = lenplot.plot( range( len( dendCa ) ), dendCa.conc, label='dend' )
Ca = [ x.Ca * 0.0001 for x in moose.wildcardFind( '/model/elec/##[ISA=CaConcBase]') ]
line3, = lenplot.plot( range( len( Ca ) ), Ca, label='elec' )
spineCaM = moose.vec( '/model/chem/spine/Ca_CaM' )
line4, = lenplot.plot( range( len( spineCaM ) ), spineCaM.conc, label='spineCaM' )
psdCaM = moose.vec( '/model/chem/psd/Ca_CaM' )
line5, = lenplot.plot( range( len( psdCaM ) ), psdCaM.conc, label='psdCaM' )
lenplot.set_ylim( 0, 0.01 )
fig.canvas.draw()
return ( chem, elec, ca, lenplot, fig, line1, line2, line3, line4, line5 )
def find_max_voxel(): #added by Chaitanya
spineCa = len(moose.vec( '/model/chem/spine/Ca' ))
dendCa = len(moose.vec( '/model/chem/dend/DEND/Ca' ))
Ca = len([ x.Ca * 0.0001 for x in moose.wildcardFind( '/model/elec/##[ISA=CaConcBase]') ])
spineCaM = len(moose.vec( '/model/chem/spine/Ca_CaM' ))
psdCaM = len(moose.vec( '/model/chem/psd/Ca_CaM' ))
return max(spineCa, dendCa, Ca, spineCaM, psdCaM)
def save_NSDF( cPlotDt, ePlotDt, voxel_val_dict, vox_info ): #added by Chaitanya
sys.path.append('../..')
import nsdf
chem_sources = []
writer = nsdf.NSDFWriter('moose_multi.h5', mode='w')
data_obj = nsdf.UniformData('conc', unit='mM')
for x in moose.wildcardFind( '/graphs/chem/#[ISA=Table]' ):
chem_sources.append(x.name)
data_obj.put_data(x.name, x.vector)
chem_source_ds = writer.add_uniform_ds('chem', chem_sources)
data_obj.set_dt(cPlotDt, unit='s')
writer.add_uniform_data(chem_source_ds, data_obj)
data_obj = nsdf.UniformData('Vm', unit='V')
elec_sources = []
for x in moose.wildcardFind( '/graphs/elec/#[ISA=Table]' ):
elec_sources.append(x.name)
data_obj.put_data(x.name, x.vector)
elec_source_ds = writer.add_uniform_ds('elec', elec_sources)
data_obj.set_dt(ePlotDt, unit='s')
writer.add_uniform_data(elec_source_ds, data_obj)
data_obj = nsdf.UniformData('[Ca]', unit='mM')
ca_sources = []
for x in moose.wildcardFind( '/graphs/ca/#[ISA=Table]' ):
ca_sources.append(x.name)
data_obj.put_data(x.name, x.vector)
ca_source_ds = writer.add_uniform_ds('Ca', ca_sources)
data_obj.set_dt(ePlotDt, unit='s')
writer.add_uniform_data(ca_source_ds, data_obj)
h5 = writer._fd #Falling back to using native h5py operations. Multidimensional uniform dataset.
ds = h5.create_dataset('/data/uniform/chem/voxel', dtype=numpy.float32, shape=(vox_info[0], vox_info[1] ,len(voxel_val_dict)))
idx = 0
label_list = []
for ii,jj in voxel_val_dict.iteritems():
ds[:,:,idx] = jj
label_list.append(ii)
idx += 1
label_ds = h5.create_dataset('/map/uniform/spine_vox', data=label_list)
voxel_ds = h5.create_dataset('/map/uniform/vox_number', data=range(vox_info[0]))
tie_data_map(ds, label_ds, 'source', axis=2)
tie_data_map(ds, voxel_ds, 'voxel_number', axis=0)
ds.attrs.create('dt', data=vox_info[2])
ds.attrs.create('field', data='conc')
ds.attrs.create('tstart', data=0.0)
ds.attrs.create('unit', data='mM')
ds.attrs.create('timeunit', data='s')
def tie_data_map(d_set, m_set, name, axis=0): #Added by Chaitanya
d_set.dims[axis].label = name
m_set.make_scale(name)
d_set.dims[axis].attach_scale(m_set)
m_set.attrs.create('NAME', data='source')
def updateGraphics( plotlist, voxel_val_dict, idx ): #Edited by Chaitanya
spineCa = moose.vec( '/model/chem/spine/Ca' )
dendCa = moose.vec( '/model/chem/dend/DEND/Ca' )
plotlist[5].set_ydata( spineCa.conc )
plotlist[6].set_ydata( dendCa.conc )
ca = [ x.Ca * 0.0001 for x in moose.wildcardFind( '/model/elec/##[ISA=CaConcBase]') ]
plotlist[7].set_ydata( ca )
spineCaM = moose.vec( '/model/chem/spine/Ca_CaM' )
plotlist[8].set_ydata( spineCaM.conc )
psdCaM = moose.vec( '/model/chem/psd/Ca_CaM' )
plotlist[9].set_ydata( psdCaM.conc )
plotlist[4].canvas.draw()
voxel_val_dict['spine'][:len(spineCa.conc), idx] = spineCa.conc #
voxel_val_dict['dend'][:len(dendCa.conc), idx] = dendCa.conc #
voxel_val_dict['elec'][:len(ca), idx] = ca #
voxel_val_dict['spineCaM'][:len(spineCaM.conc), idx] = spineCaM.conc #
voxel_val_dict['psdCaM'][:len(psdCaM.conc), idx] = psdCaM.conc #
return voxel_val_dict #
def finalizeGraphics( plotlist, cPlotDt, ePlotDt ):
for x in moose.wildcardFind( '/graphs/chem/#[ISA=Table]' ):
pos = numpy.arange( 0, x.vector.size, 1 ) * cPlotDt
line1, = plotlist[0].plot( pos, x.vector, label=x.name )
for x in moose.wildcardFind( '/graphs/elec/#[ISA=Table]' ):
pos = numpy.arange( 0, x.vector.size, 1 ) * ePlotDt
line1, = plotlist[1].plot( pos, x.vector, label=x.name )
for x in moose.wildcardFind( '/graphs/ca/#[ISA=Table]' ):
pos = numpy.arange( 0, x.vector.size, 1 ) * ePlotDt
line1, = plotlist[2].plot( pos, x.vector, label=x.name )
plotlist[4].canvas.draw()
raw_input()
def testNeuroMeshMultiscale():
runtime = 0.5
#elecDt = 0.2e-6
elecDt = 10e-6
chemDt = 0.0025
ePlotDt = 0.5e-3
cPlotDt = 0.0025
plotName = 'nm.plot'
makeNeuroMeshModel()
print "after model is completely done"
for i in moose.wildcardFind( '/model/chem/#/#/#/transloc#' ):
print i[0].name, i[0].Kf, i[0].Kb, i[0].kf, i[0].kb
makeChemPlots()
makeElecPlots()
makeCaPlots()
for i in range (10):
moose.setClock( i, elecDt )
for i in range ( 10, 20 ):
moose.setClock( i, chemDt )
moose.setClock( 8, ePlotDt )
moose.setClock( 18, cPlotDt )
moose.useClock( 8, '/graphs/elec/#,/graphs/ca/#', 'process' )
moose.useClock( 18, '/graphs/chem/#', 'process' )
hsolve = moose.HSolve( '/model/elec/hsolve' )
hsolve.dt = elecDt
hsolve.target = '/model/elec/compt'
plotlist = makeGraphics()
moose.reinit()
moose.element( '/model/elec/soma' ).inject = 2e-10
moose.element( '/model/chem/psd/Ca' ).concInit = 0.001
moose.element( '/model/chem/spine/Ca' ).concInit = 0.002
moose.element( '/model/chem/dend/DEND/Ca' ).concInit = 0.003
moose.reinit()
numDivs = 200
partialRuntime = runtime / numDivs
max_voxel = find_max_voxel()
voxel_val_dict = {'spine':numpy.zeros((max_voxel, numDivs)),
'dend':numpy.zeros((max_voxel, numDivs)),
'elec':numpy.zeros((max_voxel, numDivs)),
'spineCaM':numpy.zeros((max_voxel, numDivs)),
'psdCaM':numpy.zeros((max_voxel, numDivs))}
for i in range( numDivs ):
moose.start( partialRuntime )
voxel_val_dict = updateGraphics( plotlist, voxel_val_dict, i ) #Edited by Chaitanya
# moose.element( '/model/elec/soma' ).inject = 0
# moose.start( 0.25 )
save_NSDF(cPlotDt, ePlotDt, voxel_val_dict, [max_voxel, numDivs, partialRuntime]) #Edited by Chaitanya
finalizeGraphics( plotlist, cPlotDt, ePlotDt )
def main():
testNeuroMeshMultiscale()
if __name__ == '__main__':
main()
#
# minimal.py ends here.
| gpl-3.0 |
cbertinato/pandas | pandas/tests/arrays/categorical/test_sorting.py | 1 | 5053 | import numpy as np
import pytest
from pandas import Categorical, Index
import pandas.util.testing as tm
class TestCategoricalSort:
def test_argsort(self):
c = Categorical([5, 3, 1, 4, 2], ordered=True)
expected = np.array([2, 4, 1, 3, 0])
tm.assert_numpy_array_equal(c.argsort(ascending=True), expected,
check_dtype=False)
expected = expected[::-1]
tm.assert_numpy_array_equal(c.argsort(ascending=False), expected,
check_dtype=False)
def test_numpy_argsort(self):
c = Categorical([5, 3, 1, 4, 2], ordered=True)
expected = np.array([2, 4, 1, 3, 0])
tm.assert_numpy_array_equal(np.argsort(c), expected,
check_dtype=False)
tm.assert_numpy_array_equal(np.argsort(c, kind='mergesort'), expected,
check_dtype=False)
msg = "the 'axis' parameter is not supported"
with pytest.raises(ValueError, match=msg):
np.argsort(c, axis=0)
msg = "the 'order' parameter is not supported"
with pytest.raises(ValueError, match=msg):
np.argsort(c, order='C')
def test_sort_values(self):
# unordered cats are sortable
cat = Categorical(["a", "b", "b", "a"], ordered=False)
cat.sort_values()
cat = Categorical(["a", "c", "b", "d"], ordered=True)
# sort_values
res = cat.sort_values()
exp = np.array(["a", "b", "c", "d"], dtype=object)
tm.assert_numpy_array_equal(res.__array__(), exp)
tm.assert_index_equal(res.categories, cat.categories)
cat = Categorical(["a", "c", "b", "d"],
categories=["a", "b", "c", "d"], ordered=True)
res = cat.sort_values()
exp = np.array(["a", "b", "c", "d"], dtype=object)
tm.assert_numpy_array_equal(res.__array__(), exp)
tm.assert_index_equal(res.categories, cat.categories)
res = cat.sort_values(ascending=False)
exp = np.array(["d", "c", "b", "a"], dtype=object)
tm.assert_numpy_array_equal(res.__array__(), exp)
tm.assert_index_equal(res.categories, cat.categories)
# sort (inplace order)
cat1 = cat.copy()
cat1.sort_values(inplace=True)
exp = np.array(["a", "b", "c", "d"], dtype=object)
tm.assert_numpy_array_equal(cat1.__array__(), exp)
tm.assert_index_equal(res.categories, cat.categories)
# reverse
cat = Categorical(["a", "c", "c", "b", "d"], ordered=True)
res = cat.sort_values(ascending=False)
exp_val = np.array(["d", "c", "c", "b", "a"], dtype=object)
exp_categories = Index(["a", "b", "c", "d"])
tm.assert_numpy_array_equal(res.__array__(), exp_val)
tm.assert_index_equal(res.categories, exp_categories)
def test_sort_values_na_position(self):
# see gh-12882
cat = Categorical([5, 2, np.nan, 2, np.nan], ordered=True)
exp_categories = Index([2, 5])
exp = np.array([2.0, 2.0, 5.0, np.nan, np.nan])
res = cat.sort_values() # default arguments
tm.assert_numpy_array_equal(res.__array__(), exp)
tm.assert_index_equal(res.categories, exp_categories)
exp = np.array([np.nan, np.nan, 2.0, 2.0, 5.0])
res = cat.sort_values(ascending=True, na_position='first')
tm.assert_numpy_array_equal(res.__array__(), exp)
tm.assert_index_equal(res.categories, exp_categories)
exp = np.array([np.nan, np.nan, 5.0, 2.0, 2.0])
res = cat.sort_values(ascending=False, na_position='first')
tm.assert_numpy_array_equal(res.__array__(), exp)
tm.assert_index_equal(res.categories, exp_categories)
exp = np.array([2.0, 2.0, 5.0, np.nan, np.nan])
res = cat.sort_values(ascending=True, na_position='last')
tm.assert_numpy_array_equal(res.__array__(), exp)
tm.assert_index_equal(res.categories, exp_categories)
exp = np.array([5.0, 2.0, 2.0, np.nan, np.nan])
res = cat.sort_values(ascending=False, na_position='last')
tm.assert_numpy_array_equal(res.__array__(), exp)
tm.assert_index_equal(res.categories, exp_categories)
cat = Categorical(["a", "c", "b", "d", np.nan], ordered=True)
res = cat.sort_values(ascending=False, na_position='last')
exp_val = np.array(["d", "c", "b", "a", np.nan], dtype=object)
exp_categories = Index(["a", "b", "c", "d"])
tm.assert_numpy_array_equal(res.__array__(), exp_val)
tm.assert_index_equal(res.categories, exp_categories)
cat = Categorical(["a", "c", "b", "d", np.nan], ordered=True)
res = cat.sort_values(ascending=False, na_position='first')
exp_val = np.array([np.nan, "d", "c", "b", "a"], dtype=object)
exp_categories = Index(["a", "b", "c", "d"])
tm.assert_numpy_array_equal(res.__array__(), exp_val)
tm.assert_index_equal(res.categories, exp_categories)
| bsd-3-clause |
thomasaarholt/hyperspy | hyperspy/tests/signal/test_image_contrast_editor_tool.py | 2 | 3985 | # -*- coding: utf-8 -*-
# Copyright 2007-2020 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
import pytest
import hyperspy.api as hs
from hyperspy.signal_tools import ImageContrastEditor
class TestContrastEditorTool:
def setup_method(self, method):
s = hs.signals.Signal2D(np.arange(2*3*10*10).reshape(2, 3, 10, 10))
self.s = s
def test_reset_vmin_vmax(self):
s = self.s
s.plot(vmin='10th', vmax='99th')
ceditor = ImageContrastEditor(s._plot.signal_plot)
np.testing.assert_allclose(ceditor._vmin, 9.9)
np.testing.assert_allclose(ceditor._vmax, 98.01)
ceditor._vmin = 20
ceditor._vmax = 90
ceditor._reset_original_settings()
np.testing.assert_allclose(ceditor._vmin, 9.9)
np.testing.assert_allclose(ceditor._vmax, 98.01)
def test_reset_span_selector(self):
s = self.s
s.plot(vmin='10th', vmax='99th')
ceditor = ImageContrastEditor(s._plot.signal_plot)
ceditor.span_selector.set_initial((20, 90))
try:
ceditor.update_span_selector()
ax_image = s._plot.signal_plot.ax.images[0]
np.testing.assert_allclose(ax_image.norm.vmin, 20)
np.testing.assert_allclose(ax_image.norm.vmax, 90)
ceditor._reset_span_selector()
np.testing.assert_allclose(ax_image.norm.vmin, 9.9)
np.testing.assert_allclose(ax_image.norm.vmax, 98.01)
except TypeError as e:
# Failure sometimes seen with pytest-xdist (parallel tests)
# Message: `TypeError: restore_region() argument 1 must be matplotlib.backends._backend_agg.BufferRegion, not None`
# See e.g. https://github.com/hyperspy/hyperspy/issues/1688
# for a similar issue. Currently unclear what solution is.
pytest.skip(f"Skipping reset_span_selector test due to {e}")
def test_change_navigation_coordinate(self):
s = self.s
s.plot(vmin='10th', vmax='99th')
ceditor = ImageContrastEditor(s._plot.signal_plot)
np.testing.assert_allclose(ceditor._vmin, 9.9)
np.testing.assert_allclose(ceditor._vmax, 98.01)
try:
# Convenience to be able to run test on systems using backends
# supporting blit
s.axes_manager.indices = (1, 1)
except TypeError:
pass
np.testing.assert_allclose(ceditor._vmin, 409.9)
np.testing.assert_allclose(ceditor._vmax, 498.01)
def test_vmin_vmax_changed(self):
s = self.s
s.plot(vmin='0th', vmax='100th')
ceditor = ImageContrastEditor(s._plot.signal_plot)
np.testing.assert_allclose(ceditor._vmin, 0.0)
np.testing.assert_allclose(ceditor._vmax, 99.0)
try:
# Convenience to be able to run test on systems using backends
# supporting blit
ceditor._vmin_percentile_changed(0, 10)
except TypeError:
pass
try:
# Convenience to be able to run test on systems using backends
# supporting blit
ceditor._vmax_percentile_changed(100, 99)
except TypeError:
pass
np.testing.assert_allclose(ceditor._vmin, 9.9)
np.testing.assert_allclose(ceditor._vmax, 98.01)
| gpl-3.0 |
paulstey/sim_categorical | src/sim_model.py | 1 | 1458 | import pandas as pd
import numpy as np
student_prgm = pd.read_csv('../simdata/student_prgm.csv')
student_dem = pd.read_csv('../simdata/student_dem.csv')
grants = pd.read_csv('../simdata/grants.csv')
courses = pd.read_csv('../simdata/courses.csv')
apps = pd.read_csv('../simdata/apps.csv')
pubs = pd.read_csv('../simdata/pubs.csv')
student_prgm.head()
student_dem.head()
grants.head() # needs aggregation
courses.head() # needs aggregation
apps.head()
pubs.head() # needs aggregation
pubs_grp = pubs.groupby('student_id')
pubs_count = pubs_grp['journal'].count().reset_index()
pubs_max = pubs_grp['impact_factor'].max().reset_index()
pubs_mean = pubs_grp['impact_factor'].mean().reset_index()
def recode(x, old_new_dict):
var_type = type(list(old_new_dict.values())[0]) # get type of dictionaries value
n = len(x)
out = np.zeros(n, var_type)
for i in range(n):
out[i] = old_new_dict[x[i]]
return out
courses['grade_num'] = recode(courses['final_grade'], {'A': 4, 'B': 3, 'C': 2})
courses_grp = courses.groupby('student_id')
courses_count = courses_grp['course_num'].count().reset_index()
courses_gpa = courses_grp['grade_num'].mean().reset_index()
grants_grp = grants.groupby('student_id')
grants_count = grants_grp['amount'].count().reset_index()
grants_mean = grants_grp['amount'].mean().reset_index()
grants_max = grants_grp['amount'].max().reset_index()
| gpl-3.0 |
ngoix/OCRF | benchmarks/bench_orca.py | 1 | 4151 | """
==========================================
Orca benchmark
==========================================
A test of Orca on classical anomaly detection datasets.
"""
print(__doc__)
from time import time
import numpy as np
# import matplotlib.pyplot as plt
# for the cluster to save the fig:
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
from sklearn.neighbors import Orca
from sklearn.metrics import roc_curve, precision_recall_curve, auc
from sklearn.datasets import one_class_data
from sklearn.utils import shuffle as sh
from scipy.interpolate import interp1d
from sklearn.utils import TimeoutError
np.random.seed(1)
nb_exp = 10
orca_max_train = 1000000
orca_max_test = 1000000
# # datasets available:
# datasets = ['http', 'smtp', 'SA', 'SF', 'shuttle', 'forestcover',
# 'ionosphere', 'spambase', 'annthyroid', 'arrhythmia',
# 'pendigits', 'pima', 'wilt','internet_ads', 'adult']
# continuous datasets:
datasets = ['http', 'smtp', 'shuttle', 'forestcover',
'ionosphere', 'spambase', 'annthyroid', 'arrhythmia',
'pendigits', 'pima', 'wilt', 'adult']
# # new datasets:
# datasets = ['ionosphere', 'spambase', 'annthyroid', 'arrhythmia',
# 'pendigits', 'pima', 'wilt', 'adult']
# datasets = ['ionosphere']
plt.figure(figsize=(25, 17))
for dat in datasets:
# loading and vectorization
X, y = one_class_data(dat)
n_samples, n_features = np.shape(X)
n_samples_train = n_samples // 2
# training on max ocsvm_max_train data:
n_samples_train = min(n_samples // 2, orca_max_train)
n_samples_test = min(n_samples - n_samples_train, orca_max_test)
n_axis = 1000
x_axis = np.linspace(0, 1, n_axis)
tpr = np.zeros(n_axis)
precision = np.zeros(n_axis)
fit_predict_time = 0
try:
for ne in range(nb_exp):
print 'exp num:', ne
X, y = sh(X, y)
X_train = X[:n_samples_train, :]
X_test = X[n_samples_train:(n_samples_train + n_samples_test), :]
y_train = y[:n_samples_train]
y_test = y[n_samples_train:(n_samples_train + n_samples_test)]
# # training only on normal data:
# X_train = X_train[y_train == 0]
# y_train = y_train[y_train == 0]
print('Orca processing...')
model = Orca()
tstart = time()
# the lower,the more normal:
scoring = model.fit_predict(X_train, X_test)
fit_predict_time += time() - tstart
fpr_, tpr_, thresholds_ = roc_curve(y_test, scoring)
f = interp1d(fpr_, tpr_)
tpr += f(x_axis)
tpr[0] = 0.
precision_, recall_ = precision_recall_curve(y_test, scoring)[:2]
# cluster: old version of scipy -> interpol1d needs sorted x_input
arg_sorted = recall_.argsort()
recall_ = recall_[arg_sorted]
precision_ = precision_[arg_sorted]
f = interp1d(recall_, precision_)
precision += f(x_axis)
except TimeoutError:
continue
tpr /= float(nb_exp)
fit_predict_time /= float(nb_exp)
AUC = auc(x_axis, tpr)
precision /= float(nb_exp)
precision[0] = 1.
AUPR = auc(x_axis, precision)
plt.subplot(121)
plt.plot(x_axis, tpr, lw=1, label='%s (area = %0.3f, train+test-time: %0.2fs)' % (dat, AUC, fit_predict_time))
plt.xlim([-0.05, 1.05])
plt.ylim([-0.05, 1.05])
plt.xlabel('False Positive Rate', fontsize=25)
plt.ylabel('True Positive Rate', fontsize=25)
plt.title('Receiver operating characteristic for Orca',
fontsize=25)
plt.legend(loc="lower right", prop={'size': 15})
plt.subplot(122)
plt.plot(x_axis, precision, lw=1, label='%s (area = %0.3f)'
% (dat, AUPR))
plt.xlim([-0.05, 1.05])
plt.ylim([-0.05, 1.05])
plt.xlabel('Recall', fontsize=25)
plt.ylabel('Precision', fontsize=25)
plt.title('Precision-Recall curve', fontsize=25)
plt.legend(loc="lower right", prop={'size': 15})
plt.savefig('results_ocrf/bench_orca_roc_pr_unsupervised_factorized')
| bsd-3-clause |
andrewnc/scikit-learn | examples/neighbors/plot_species_kde.py | 282 | 4059 | """
================================================
Kernel Density Estimate of Species Distributions
================================================
This shows an example of a neighbors-based query (in particular a kernel
density estimate) on geospatial data, using a Ball Tree built upon the
Haversine distance metric -- i.e. distances over points in latitude/longitude.
The dataset is provided by Phillips et. al. (2006).
If available, the example uses
`basemap <http://matplotlib.sourceforge.net/basemap/doc/html/>`_
to plot the coast lines and national boundaries of South America.
This example does not perform any learning over the data
(see :ref:`example_applications_plot_species_distribution_modeling.py` for
an example of classification based on the attributes in this dataset). It
simply shows the kernel density estimate of observed data points in
geospatial coordinates.
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/apps/redlist/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/apps/redlist/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
References
----------
* `"Maximum entropy modeling of species geographic distributions"
<http://www.cs.princeton.edu/~schapire/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
"""
# Author: Jake Vanderplas <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_species_distributions
from sklearn.datasets.species_distributions import construct_grids
from sklearn.neighbors import KernelDensity
# if basemap is available, we'll use it.
# otherwise, we'll improvise later...
try:
from mpl_toolkits.basemap import Basemap
basemap = True
except ImportError:
basemap = False
# Get matrices/arrays of species IDs and locations
data = fetch_species_distributions()
species_names = ['Bradypus Variegatus', 'Microryzomys Minutus']
Xtrain = np.vstack([data['train']['dd lat'],
data['train']['dd long']]).T
ytrain = np.array([d.decode('ascii').startswith('micro')
for d in data['train']['species']], dtype='int')
Xtrain *= np.pi / 180. # Convert lat/long to radians
# Set up the data grid for the contour plot
xgrid, ygrid = construct_grids(data)
X, Y = np.meshgrid(xgrid[::5], ygrid[::5][::-1])
land_reference = data.coverages[6][::5, ::5]
land_mask = (land_reference > -9999).ravel()
xy = np.vstack([Y.ravel(), X.ravel()]).T
xy = xy[land_mask]
xy *= np.pi / 180.
# Plot map of South America with distributions of each species
fig = plt.figure()
fig.subplots_adjust(left=0.05, right=0.95, wspace=0.05)
for i in range(2):
plt.subplot(1, 2, i + 1)
# construct a kernel density estimate of the distribution
print(" - computing KDE in spherical coordinates")
kde = KernelDensity(bandwidth=0.04, metric='haversine',
kernel='gaussian', algorithm='ball_tree')
kde.fit(Xtrain[ytrain == i])
# evaluate only on the land: -9999 indicates ocean
Z = -9999 + np.zeros(land_mask.shape[0])
Z[land_mask] = np.exp(kde.score_samples(xy))
Z = Z.reshape(X.shape)
# plot contours of the density
levels = np.linspace(0, Z.max(), 25)
plt.contourf(X, Y, Z, levels=levels, cmap=plt.cm.Reds)
if basemap:
print(" - plot coastlines using basemap")
m = Basemap(projection='cyl', llcrnrlat=Y.min(),
urcrnrlat=Y.max(), llcrnrlon=X.min(),
urcrnrlon=X.max(), resolution='c')
m.drawcoastlines()
m.drawcountries()
else:
print(" - plot coastlines from coverage")
plt.contour(X, Y, land_reference,
levels=[-9999], colors="k",
linestyles="solid")
plt.xticks([])
plt.yticks([])
plt.title(species_names[i])
plt.show()
| bsd-3-clause |
toobaz/pandas | asv_bench/benchmarks/join_merge.py | 1 | 11416 | import string
import numpy as np
import pandas.util.testing as tm
from pandas import DataFrame, Series, MultiIndex, date_range, concat, merge, merge_asof
try:
from pandas import merge_ordered
except ImportError:
from pandas import ordered_merge as merge_ordered
class Append:
def setup(self):
self.df1 = DataFrame(np.random.randn(10000, 4), columns=["A", "B", "C", "D"])
self.df2 = self.df1.copy()
self.df2.index = np.arange(10000, 20000)
self.mdf1 = self.df1.copy()
self.mdf1["obj1"] = "bar"
self.mdf1["obj2"] = "bar"
self.mdf1["int1"] = 5
self.mdf1 = self.mdf1._consolidate()
self.mdf2 = self.mdf1.copy()
self.mdf2.index = self.df2.index
def time_append_homogenous(self):
self.df1.append(self.df2)
def time_append_mixed(self):
self.mdf1.append(self.mdf2)
class Concat:
params = [0, 1]
param_names = ["axis"]
def setup(self, axis):
N = 1000
s = Series(N, index=tm.makeStringIndex(N))
self.series = [s[i:-i] for i in range(1, 10)] * 50
self.small_frames = [DataFrame(np.random.randn(5, 4))] * 1000
df = DataFrame(
{"A": range(N)}, index=date_range("20130101", periods=N, freq="s")
)
self.empty_left = [DataFrame(), df]
self.empty_right = [df, DataFrame()]
self.mixed_ndims = [df, df.head(N // 2)]
def time_concat_series(self, axis):
concat(self.series, axis=axis, sort=False)
def time_concat_small_frames(self, axis):
concat(self.small_frames, axis=axis)
def time_concat_empty_right(self, axis):
concat(self.empty_right, axis=axis)
def time_concat_empty_left(self, axis):
concat(self.empty_left, axis=axis)
def time_concat_mixed_ndims(self, axis):
concat(self.mixed_ndims, axis=axis)
class ConcatDataFrames:
params = ([0, 1], [True, False])
param_names = ["axis", "ignore_index"]
def setup(self, axis, ignore_index):
frame_c = DataFrame(np.zeros((10000, 200), dtype=np.float32, order="C"))
self.frame_c = [frame_c] * 20
frame_f = DataFrame(np.zeros((10000, 200), dtype=np.float32, order="F"))
self.frame_f = [frame_f] * 20
def time_c_ordered(self, axis, ignore_index):
concat(self.frame_c, axis=axis, ignore_index=ignore_index)
def time_f_ordered(self, axis, ignore_index):
concat(self.frame_f, axis=axis, ignore_index=ignore_index)
class Join:
params = [True, False]
param_names = ["sort"]
def setup(self, sort):
level1 = tm.makeStringIndex(10).values
level2 = tm.makeStringIndex(1000).values
codes1 = np.arange(10).repeat(1000)
codes2 = np.tile(np.arange(1000), 10)
index2 = MultiIndex(levels=[level1, level2], codes=[codes1, codes2])
self.df_multi = DataFrame(
np.random.randn(len(index2), 4), index=index2, columns=["A", "B", "C", "D"]
)
self.key1 = np.tile(level1.take(codes1), 10)
self.key2 = np.tile(level2.take(codes2), 10)
self.df = DataFrame(
{
"data1": np.random.randn(100000),
"data2": np.random.randn(100000),
"key1": self.key1,
"key2": self.key2,
}
)
self.df_key1 = DataFrame(
np.random.randn(len(level1), 4), index=level1, columns=["A", "B", "C", "D"]
)
self.df_key2 = DataFrame(
np.random.randn(len(level2), 4), index=level2, columns=["A", "B", "C", "D"]
)
shuf = np.arange(100000)
np.random.shuffle(shuf)
self.df_shuf = self.df.reindex(self.df.index[shuf])
def time_join_dataframe_index_multi(self, sort):
self.df.join(self.df_multi, on=["key1", "key2"], sort=sort)
def time_join_dataframe_index_single_key_bigger(self, sort):
self.df.join(self.df_key2, on="key2", sort=sort)
def time_join_dataframe_index_single_key_small(self, sort):
self.df.join(self.df_key1, on="key1", sort=sort)
def time_join_dataframe_index_shuffle_key_bigger_sort(self, sort):
self.df_shuf.join(self.df_key2, on="key2", sort=sort)
class JoinIndex:
def setup(self):
N = 50000
self.left = DataFrame(
np.random.randint(1, N / 500, (N, 2)), columns=["jim", "joe"]
)
self.right = DataFrame(
np.random.randint(1, N / 500, (N, 2)), columns=["jolie", "jolia"]
).set_index("jolie")
def time_left_outer_join_index(self):
self.left.join(self.right, on="jim")
class JoinNonUnique:
# outer join of non-unique
# GH 6329
def setup(self):
date_index = date_range("01-Jan-2013", "23-Jan-2013", freq="T")
daily_dates = date_index.to_period("D").to_timestamp("S", "S")
self.fracofday = date_index.values - daily_dates.values
self.fracofday = self.fracofday.astype("timedelta64[ns]")
self.fracofday = self.fracofday.astype(np.float64) / 86400000000000.0
self.fracofday = Series(self.fracofday, daily_dates)
index = date_range(date_index.min(), date_index.max(), freq="D")
self.temp = Series(1.0, index)[self.fracofday.index]
def time_join_non_unique_equal(self):
self.fracofday * self.temp
class Merge:
params = [True, False]
param_names = ["sort"]
def setup(self, sort):
N = 10000
indices = tm.makeStringIndex(N).values
indices2 = tm.makeStringIndex(N).values
key = np.tile(indices[:8000], 10)
key2 = np.tile(indices2[:8000], 10)
self.left = DataFrame(
{"key": key, "key2": key2, "value": np.random.randn(80000)}
)
self.right = DataFrame(
{
"key": indices[2000:],
"key2": indices2[2000:],
"value2": np.random.randn(8000),
}
)
self.df = DataFrame(
{
"key1": np.tile(np.arange(500).repeat(10), 2),
"key2": np.tile(np.arange(250).repeat(10), 4),
"value": np.random.randn(10000),
}
)
self.df2 = DataFrame({"key1": np.arange(500), "value2": np.random.randn(500)})
self.df3 = self.df[:5000]
def time_merge_2intkey(self, sort):
merge(self.left, self.right, sort=sort)
def time_merge_dataframe_integer_2key(self, sort):
merge(self.df, self.df3, sort=sort)
def time_merge_dataframe_integer_key(self, sort):
merge(self.df, self.df2, on="key1", sort=sort)
class I8Merge:
params = ["inner", "outer", "left", "right"]
param_names = ["how"]
def setup(self, how):
low, high, n = -1000, 1000, 10 ** 6
self.left = DataFrame(
np.random.randint(low, high, (n, 7)), columns=list("ABCDEFG")
)
self.left["left"] = self.left.sum(axis=1)
self.right = self.left.sample(frac=1).rename({"left": "right"}, axis=1)
self.right = self.right.reset_index(drop=True)
self.right["right"] *= -1
def time_i8merge(self, how):
merge(self.left, self.right, how=how)
class MergeCategoricals:
def setup(self):
self.left_object = DataFrame(
{
"X": np.random.choice(range(0, 10), size=(10000,)),
"Y": np.random.choice(["one", "two", "three"], size=(10000,)),
}
)
self.right_object = DataFrame(
{
"X": np.random.choice(range(0, 10), size=(10000,)),
"Z": np.random.choice(["jjj", "kkk", "sss"], size=(10000,)),
}
)
self.left_cat = self.left_object.assign(
Y=self.left_object["Y"].astype("category")
)
self.right_cat = self.right_object.assign(
Z=self.right_object["Z"].astype("category")
)
def time_merge_object(self):
merge(self.left_object, self.right_object, on="X")
def time_merge_cat(self):
merge(self.left_cat, self.right_cat, on="X")
class MergeOrdered:
def setup(self):
groups = tm.makeStringIndex(10).values
self.left = DataFrame(
{
"group": groups.repeat(5000),
"key": np.tile(np.arange(0, 10000, 2), 10),
"lvalue": np.random.randn(50000),
}
)
self.right = DataFrame(
{"key": np.arange(10000), "rvalue": np.random.randn(10000)}
)
def time_merge_ordered(self):
merge_ordered(self.left, self.right, on="key", left_by="group")
class MergeAsof:
params = [["backward", "forward", "nearest"]]
param_names = ["direction"]
def setup(self, direction):
one_count = 200000
two_count = 1000000
df1 = DataFrame(
{
"time": np.random.randint(0, one_count / 20, one_count),
"key": np.random.choice(list(string.ascii_uppercase), one_count),
"key2": np.random.randint(0, 25, one_count),
"value1": np.random.randn(one_count),
}
)
df2 = DataFrame(
{
"time": np.random.randint(0, two_count / 20, two_count),
"key": np.random.choice(list(string.ascii_uppercase), two_count),
"key2": np.random.randint(0, 25, two_count),
"value2": np.random.randn(two_count),
}
)
df1 = df1.sort_values("time")
df2 = df2.sort_values("time")
df1["time32"] = np.int32(df1.time)
df2["time32"] = np.int32(df2.time)
self.df1a = df1[["time", "value1"]]
self.df2a = df2[["time", "value2"]]
self.df1b = df1[["time", "key", "value1"]]
self.df2b = df2[["time", "key", "value2"]]
self.df1c = df1[["time", "key2", "value1"]]
self.df2c = df2[["time", "key2", "value2"]]
self.df1d = df1[["time32", "value1"]]
self.df2d = df2[["time32", "value2"]]
self.df1e = df1[["time", "key", "key2", "value1"]]
self.df2e = df2[["time", "key", "key2", "value2"]]
def time_on_int(self, direction):
merge_asof(self.df1a, self.df2a, on="time", direction=direction)
def time_on_int32(self, direction):
merge_asof(self.df1d, self.df2d, on="time32", direction=direction)
def time_by_object(self, direction):
merge_asof(self.df1b, self.df2b, on="time", by="key", direction=direction)
def time_by_int(self, direction):
merge_asof(self.df1c, self.df2c, on="time", by="key2", direction=direction)
def time_multiby(self, direction):
merge_asof(
self.df1e, self.df2e, on="time", by=["key", "key2"], direction=direction
)
class Align:
def setup(self):
size = 5 * 10 ** 5
rng = np.arange(0, 10 ** 13, 10 ** 7)
stamps = np.datetime64("now").view("i8") + rng
idx1 = np.sort(np.random.choice(stamps, size, replace=False))
idx2 = np.sort(np.random.choice(stamps, size, replace=False))
self.ts1 = Series(np.random.randn(size), idx1)
self.ts2 = Series(np.random.randn(size), idx2)
def time_series_align_int64_index(self):
self.ts1 + self.ts2
def time_series_align_left_monotonic(self):
self.ts1.align(self.ts2, join="left")
from .pandas_vb_common import setup # noqa: F401
| bsd-3-clause |
darribas/pysal | pysal/contrib/viz/folium_mapping.py | 7 | 10440 | import numpy as np
import folium as fm
import pysal as ps
import pandas as pd
import geojson as gj
import os as os
from IPython.display import HTML
def inline_map(Map):
'''
Embeds the HTML source of the map directly into the IPython notebook.
This method will not work if the map depends on any files (json data). Also this uses
the HTML5 srcdoc attribute, which may not be supported in all browsers.
'''
Map._build_map()
return HTML('<iframe srcdoc="{srcdoc}" style="width: 100%; height: 510px; border: none"></iframe>'.format(srcdoc=Map.HTML.replace('"', '"')))
def build_features(shp, dbf):
'''
Builds a GeoJSON object from a PySAL shapefile and DBF object
shp - shapefile opened using pysal.open(file)
dbf - dbase table opened using pysal.open(file)
Only polygonal lattices are supported.
'''
shp_bak = ps.open(shp.dataPath)
dbf_bak = ps.open(dbf.dataPath)
chains = shp_bak.read()
dbftable = dbf_bak.read()
shp_bak.close()
dbf_bak.close()
#shptype = str(shp_bak.type).strip("<class 'pysal.cg.shapes.").strip("'>")
if 'Polygon' in str(shp_bak.type):
ftype = 'Polygon'
elif 'Point' in str(type(shp_bak.type)):
raise NotImplementedError('Point data is not implemented yet')
if ftype == "Polygon":
feats = []
for idx in range(len(chains)):
chain = chains[idx]
if len(chain.parts) > 1:
#shptype = 'MultiPolygon'
geom = gj.MultiPolygon([ [[ list(coord) for coord in part]] for part
in chain.parts])
else:
#shptype = 'Polygon'
geom = gj.Polygon(coordinates = [ [ list(coord) for coord in
part] for part in chain.parts])
prop = {head: val for head,val in zip(dbf_bak.header,
dbftable[idx])}
bbox = chain.bbox
feats.append(gj.Feature(None, geometry=geom, properties=prop, bbox=bbox))
return gj.FeatureCollection(feats, bbox = shp_bak.bbox )
def json2df(jsonobj, index_on = ''):
'''
Reads a json file and constructs a pandas dataframe from it.
jsonobj - the filepath to a JSON file.
index_on - a fieldname which the final pandas dataframe will be indexed on.
'''
n = len(jsonobj['features'])
rows = [ jsonobj['features'][i]['properties'] for i in range(n) ]
try:
idxs = [ jsonobj['features'][i]['properties'][index_on] for i in range(n) ]
result = pd.DataFrame(rows, index=idxs )
except KeyError:
result = pd.DataFrame(rows)
return result
def flip(fname, shp, dbf):
with open(fname, 'w') as out:
gj.dump(build_features(shp, dbf), out)
def bboxsearch(jsonobj):
'''
Searches over a list of coordinates in a pandas dataframe to construct a
bounding box
df - pandas dataframe with fieldname "geom_name", ideally constructed from
json using json2df
geom_name - the name of the geometry field to be used.
'''
max_x = -180
max_y = -90
min_x = 180
min_y = 90
for feat in jsonobj.features:
geom = feat.geometry.coordinates
for chain in geom:
for piece in chain:
if type(piece[0]) != float:
for point in piece:
if point[0] > max_x:
max_x = point[0]
elif point[0] < min_x:
min_x = point[0]
if point[1] > max_y:
max_y = point[1]
elif point[1] < min_y:
min_y = point[1]
else:
if piece[0] > max_x:
max_x = piece[0]
elif piece[0] < min_x:
min_x = piece[0]
if piece[1] > max_y:
max_y = piece[1]
elif piece[1] < min_y:
min_y = piece[1]
return [min_x, min_y, max_x, max_y]
def choropleth_map(jsonpath, key, attribute, df = None,
classification = "Quantiles", classes = 5, bins = None, std = None,
centroid = None, zoom_start = 5, tiles = 'OpenStreetMap',
fill_color = "YlGn", fill_opacity = .5,
line_opacity = 0.2, legend_name = '',
save = True):
'''
One-shot mapping function for folium-based choropleth mapping.
jsonpath - the filepath to a JSON file
key - the field upon which the JSON and the dataframe will be linked
attribute - the attribute to be mapped
The rest of the arguments are keyword:
classification - type of classification scheme to be used
classes - number of classes used
bins - breakpoints, if manual classes are desired
'''
#Polymorphism by hand...
if isinstance(jsonpath, str):
if os.path.isfile(jsonpath):
sjson = gj.load(open(jsonpath))
else:
raise IOError('File not found')
if isinstance(jsonpath, dict):
raise NotImplementedError('Direct mapping from dictionary not yet supported')
#with open('tmp.json', 'w') as out:
# gj.dump(jsonpath, out)
# sjson = gj.load(open('tmp.json'))
if isinstance(jsonpath, tuple):
if 'ShpWrapper' in str(type(jsonpath[0])) and 'DBF' in str(type(jsonpath[1])):
flip('tmp.json', jsonpath[0], jsonpath[1])
sjson = gj.load(open('tmp.json'))
jsonpath = 'tmp.json'
elif 'ShpWrapper' in str(type(jsonpath[1])) and 'DBF' in str(type(jsonpath[0])):
flip('tmp.json', jsonpath[1], jsonpath[0])
sjson = gj.load(open('tmp.json'))
jsonpath = 'tmp.json'
else:
raise IOError('Inputs must be GeoJSON filepath, GeoJSON dictionary in memory, or shp-dbf tuple')
#key construction
if df is None:
df = json2df(sjson)
dfkey = [key, attribute]
#centroid search
if centroid == None:
if 'bbox' in sjson.keys():
bbox = sjson.bbox
bbox = bboxsearch(sjson)
xs = sum([bbox[0], bbox[2]])/2.
ys = sum([bbox[1], bbox[3]])/2.
centroid = [ys, xs]
jsonkey = 'feature.properties.' + key
choromap = fm.Map(location = centroid, zoom_start = zoom_start, tiles=tiles) # all the elements you need to make a choropleth
#standardization
if std != None:
if isinstance(std, int) or isinstance(std, float):
y = np.array(df[attribute]/std)
elif type(std) == str:
y = np.array(df[attribute]/df[std])
elif callable(std):
raise NotImplementedError('Functional Standardizations are not implemented yet')
else:
raise ValueError('Standardization must be integer, float, function, or Series')
else:
y = np.array(df[attribute].tolist())
#For people who don't read documentation...
if isinstance(classes, list):
bins = classes
classes = len(bins)
elif isinstance(classes, float):
try:
classes = int(classes)
except:
raise ValueError('Classes must be coercable to integers')
#classification passing
if classification != None:
if classification == "Maximum Breaks": #there is probably a better way to do this, but it's a start.
mapclass = ps.Maximum_Breaks(y, k=classes).bins.tolist()
elif classification == 'Quantiles':
mapclass = ps.Quantiles(y, k=classes).bins.tolist()
elif classification == 'Fisher-Jenks':
mapclass = ps.Fisher_Jenks(y, k=classes).bins
elif classification == 'Equal Interval':
mapclass = ps.Equal_Interval(y, k=classes).bins.tolist()
elif classification == 'Natural Breaks':
mapclass = ps.Natural_Breaks (y, k=classes).bins
elif classification == 'Jenks Caspall Forced':
raise NotImplementedError('Jenks Caspall Forced is not implemented yet.')
# mapclass = ps.Jenks_Caspall_Forced(y, k=classes).bins.tolist()
elif classification == 'Jenks Caspall Sampled':
raise NotImplementedError('Jenks Caspall Sampled is not implemented yet')
# mapclass = ps.Jenks_Caspall_Sampled(y, k=classes).bins.tolist()
elif classification == 'Jenks Caspall':
mapclass = ps.Jenks_Caspall (y, k=classes).bins.tolist()
elif classification == 'User Defined':
mapclass = bins
elif classification == 'Standard Deviation':
if bins == None:
l = classes / 2
bins = range(-l, l+1)
mapclass = list(ps.Std_Mean(y, bins).bins)
else:
mapclass = list(ps.Std_Mean(y, bins).bins)
elif classification == 'Percentiles':
if bins == None:
bins = [1,10,50,90,99,100]
mapclass = list(ps.Percentiles(y, bins).bins)
else:
mapclass = list(ps.Percentiles(y, bins).bins)
elif classification == 'Max P':
#raise NotImplementedError('Max-P classification is not implemented yet')
mapclass = ps.Max_P_Classifier(y, k=classes).bins.tolist()
else:
raise NotImplementedError('Your classification is not supported or was not found. Supported classifications are:\n "Maximum Breaks"\n "Quantiles"\n "Fisher-Jenks"\n "Equal Interval"\n "Natural Breaks"\n "Jenks Caspall"\n "User Defined"\n "Percentiles"\n "Max P"')
else:
print('Classification forced to None. Defaulting to Quartiles')
mapclass = ps.Quantiles(y, k=classes).bins.tolist()
#folium call, try abstracting to a "mapper" function, passing list of args
choromap.geo_json(geo_path=jsonpath, key_on = jsonkey,
data = df, columns = dfkey,
fill_color = fill_color, fill_opacity = fill_opacity,
line_opacity = line_opacity, threshold_scale = mapclass[:-1] , legend_name = legend_name
)
if save:
fname = jsonpath.rstrip('.json') + '_' + attribute + '.html'
choromap.create_map(fname)
return inline_map(choromap)
| bsd-3-clause |
jeanfeydy/lddmm-ot | LDDMM_Python/lddmm_python/lib/plotly/matplotlylib/mplexporter/tools.py | 75 | 1732 | """
Tools for matplotlib plot exporting
"""
def ipynb_vega_init():
"""Initialize the IPython notebook display elements
This function borrows heavily from the excellent vincent package:
http://github.com/wrobstory/vincent
"""
try:
from IPython.core.display import display, HTML
except ImportError:
print('IPython Notebook could not be loaded.')
require_js = '''
if (window['d3'] === undefined) {{
require.config({{ paths: {{d3: "http://d3js.org/d3.v3.min"}} }});
require(["d3"], function(d3) {{
window.d3 = d3;
{0}
}});
}};
if (window['topojson'] === undefined) {{
require.config(
{{ paths: {{topojson: "http://d3js.org/topojson.v1.min"}} }}
);
require(["topojson"], function(topojson) {{
window.topojson = topojson;
}});
}};
'''
d3_geo_projection_js_url = "http://d3js.org/d3.geo.projection.v0.min.js"
d3_layout_cloud_js_url = ("http://wrobstory.github.io/d3-cloud/"
"d3.layout.cloud.js")
topojson_js_url = "http://d3js.org/topojson.v1.min.js"
vega_js_url = 'http://trifacta.github.com/vega/vega.js'
dep_libs = '''$.getScript("%s", function() {
$.getScript("%s", function() {
$.getScript("%s", function() {
$.getScript("%s", function() {
$([IPython.events]).trigger("vega_loaded.vincent");
})
})
})
});''' % (d3_geo_projection_js_url, d3_layout_cloud_js_url,
topojson_js_url, vega_js_url)
load_js = require_js.format(dep_libs)
html = '<script>'+load_js+'</script>'
display(HTML(html))
| mit |
tboch/mocpy | mocpy/moc/plot/fill.py | 1 | 3539 | import numpy as np
from astropy.coordinates import ICRS, SkyCoord
import cdshealpix
from astropy.wcs.utils import skycoord_to_pixel
from matplotlib.path import Path
from matplotlib.patches import PathPatch
from .utils import build_plotting_moc
from . import culling_backfacing_cells
from . import axis_viewport
def compute_healpix_vertices(depth, ipix, wcs):
path_vertices = np.array([])
codes = np.array([])
depth = int(depth)
step = 1
if depth < 3:
step = 2
ipix_lon, ipix_lat = cdshealpix.vertices(ipix, depth)
ipix_lon = ipix_lon[:, [2, 3, 0, 1]]
ipix_lat = ipix_lat[:, [2, 3, 0, 1]]
ipix_boundaries = SkyCoord(ipix_lon, ipix_lat, frame=ICRS())
# Projection on the given WCS
xp, yp = skycoord_to_pixel(ipix_boundaries, wcs=wcs)
c1 = np.vstack((xp[:, 0], yp[:, 0])).T
c2 = np.vstack((xp[:, 1], yp[:, 1])).T
c3 = np.vstack((xp[:, 2], yp[:, 2])).T
c4 = np.vstack((xp[:, 3], yp[:, 3])).T
# if depth < 3:
# c5 = np.vstack((xp[:, 4], yp[:, 4])).T
# c6 = np.vstack((xp[:, 5], yp[:, 5])).T
# c7 = np.vstack((xp[:, 6], yp[:, 6])).T
# c8 = np.vstack((xp[:, 7], yp[:, 7])).T
# cells = np.hstack((c1, c2, c3, c4, c5, c6, c7, c8, np.zeros((c1.shape[0], 2))))
# path_vertices = cells.reshape((9*c1.shape[0], 2))
# single_code = np.array([Path.MOVETO, Path.LINETO, Path.LINETO, Path.LINETO, Path.LINETO, Path.LINETO, Path.LINETO, Path.LINETO, Path.CLOSEPOLY])
# else:
cells = np.hstack((c1, c2, c3, c4, np.zeros((c1.shape[0], 2))))
path_vertices = cells.reshape((5*c1.shape[0], 2))
single_code = np.array([Path.MOVETO, Path.LINETO, Path.LINETO, Path.LINETO, Path.CLOSEPOLY])
codes = np.tile(single_code, c1.shape[0])
return path_vertices, codes
def compute_the_patches(moc, wcs):
depth_ipix_d = moc.serialize(format="json")
depth_ipix_clean_d = culling_backfacing_cells.from_moc(depth_ipix_d=depth_ipix_d, wcs=wcs)
patches = []
for depth, ipix in depth_ipix_clean_d.items():
patch = compute_healpix_vertices(depth=depth,
ipix=ipix,
wcs=wcs)
patches.append(patch)
return patches
def add_patches_to_mpl_axe(patches, ax, wcs, **kw_mpl_pathpatch):
first_patch = patches[0]
vertices_first_patch, codes_first_patch = first_patch
path_vertices = np.array(vertices_first_patch)
path_codes = np.array(codes_first_patch)
for vertices, codes in patches[1:]:
path_vertices = np.vstack((path_vertices, vertices))
path_codes = np.hstack((path_codes, codes))
path = Path(path_vertices, path_codes)
patches_mpl = PathPatch(path, **kw_mpl_pathpatch)
# Add the patches to the mpl axis
ax.add_patch(patches_mpl)
axis_viewport.set(ax, wcs)
def fill(moc, ax, wcs, **kw_mpl_pathpatch):
# Simplify the MOC for plotting purposes:
# 1. Degrade the MOC if the FOV is enough big so that we cannot see the smallest HEALPix cells.
# 2. For small FOVs, plot the MOC & POLYGONAL_MOC_FROM_FOV.
moc_to_plot = build_plotting_moc(moc=moc, wcs=wcs)
# If the FOV contains no cells, then moc_to_plot (i.e. the intersection between the moc
# and the MOC created from the FOV polygon) will be empty.
# If it is the case, we exit the method without doing anything.
if not moc_to_plot.empty():
patches = compute_the_patches(moc=moc_to_plot, wcs=wcs)
add_patches_to_mpl_axe(patches=patches, ax=ax, wcs=wcs, **kw_mpl_pathpatch)
| gpl-3.0 |
bennlich/scikit-image | skimage/viewer/qt.py | 48 | 1281 | _qt_version = None
has_qt = True
try:
from matplotlib.backends.qt_compat import QtGui, QtCore, QtWidgets, QT_RC_MAJOR_VERSION as _qt_version
except ImportError:
try:
from matplotlib.backends.qt4_compat import QtGui, QtCore
QtWidgets = QtGui
_qt_version = 4
except ImportError:
# Mock objects
class QtGui_cls(object):
QMainWindow = object
QDialog = object
QWidget = object
class QtCore_cls(object):
class Qt(object):
TopDockWidgetArea = None
BottomDockWidgetArea = None
LeftDockWidgetArea = None
RightDockWidgetArea = None
def Signal(self, *args, **kwargs):
pass
QtGui = QtWidgets = QtGui_cls()
QtCore = QtCore_cls()
has_qt = False
if _qt_version == 5:
from matplotlib.backends.backend_qt5 import FigureManagerQT
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg
elif _qt_version == 4:
from matplotlib.backends.backend_qt4 import FigureManagerQT
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg
else:
FigureManagerQT = object
FigureCanvasQTAgg = object
Qt = QtCore.Qt
Signal = QtCore.Signal
| bsd-3-clause |
jseabold/statsmodels | examples/python/statespace_sarimax_internet.py | 5 | 5711 | # coding: utf-8
# DO NOT EDIT
# Autogenerated from the notebook statespace_sarimax_internet.ipynb.
# Edit the notebook and then sync the output with this file.
#
# flake8: noqa
# DO NOT EDIT
# # SARIMAX: Model selection, missing data
# The example mirrors Durbin and Koopman (2012), Chapter 8.4 in
# application of Box-Jenkins methodology to fit ARMA models. The novel
# feature is the ability of the model to work on datasets with missing
# values.
import numpy as np
import pandas as pd
from scipy.stats import norm
import statsmodels.api as sm
import matplotlib.pyplot as plt
import requests
from io import BytesIO
from zipfile import ZipFile
# Download the dataset
dk = requests.get('http://www.ssfpack.com/files/DK-data.zip').content
f = BytesIO(dk)
zipped = ZipFile(f)
df = pd.read_table(
BytesIO(zipped.read('internet.dat')),
skiprows=1,
header=None,
sep='\s+',
engine='python',
names=['internet', 'dinternet'])
# ### Model Selection
#
# As in Durbin and Koopman, we force a number of the values to be missing.
# Get the basic series
dta_full = df.dinternet[1:].values
dta_miss = dta_full.copy()
# Remove datapoints
missing = np.r_[6, 16, 26, 36, 46, 56, 66, 72, 73, 74, 75, 76, 86, 96] - 1
dta_miss[missing] = np.nan
# Then we can consider model selection using the Akaike information
# criteria (AIC), but running the model for each variant and selecting the
# model with the lowest AIC value.
#
# There are a couple of things to note here:
#
# - When running such a large batch of models, particularly when the
# autoregressive and moving average orders become large, there is the
# possibility of poor maximum likelihood convergence. Below we ignore the
# warnings since this example is illustrative.
# - We use the option `enforce_invertibility=False`, which allows the
# moving average polynomial to be non-invertible, so that more of the models
# are estimable.
# - Several of the models do not produce good results, and their AIC value
# is set to NaN. This is not surprising, as Durbin and Koopman note
# numerical problems with the high order models.
import warnings
aic_full = pd.DataFrame(np.zeros((6, 6), dtype=float))
aic_miss = pd.DataFrame(np.zeros((6, 6), dtype=float))
warnings.simplefilter('ignore')
# Iterate over all ARMA(p,q) models with p,q in [0,6]
for p in range(6):
for q in range(6):
if p == 0 and q == 0:
continue
# Estimate the model with no missing datapoints
mod = sm.tsa.statespace.SARIMAX(
dta_full, order=(p, 0, q), enforce_invertibility=False)
try:
res = mod.fit(disp=False)
aic_full.iloc[p, q] = res.aic
except:
aic_full.iloc[p, q] = np.nan
# Estimate the model with missing datapoints
mod = sm.tsa.statespace.SARIMAX(
dta_miss, order=(p, 0, q), enforce_invertibility=False)
try:
res = mod.fit(disp=False)
aic_miss.iloc[p, q] = res.aic
except:
aic_miss.iloc[p, q] = np.nan
# For the models estimated over the full (non-missing) dataset, the AIC
# chooses ARMA(1,1) or ARMA(3,0). Durbin and Koopman suggest the ARMA(1,1)
# specification is better due to parsimony.
#
# $$
# \text{Replication of:}\\
# \textbf{Table 8.1} ~~ \text{AIC for different ARMA models.}\\
# \newcommand{\r}[1]{{\color{red}{#1}}}
# \begin{array}{lrrrrrr}
# \hline
# q & 0 & 1 & 2 & 3 & 4 & 5 \\
# \hline
# p & {} & {} & {} & {} & {} & {} \\
# 0 & 0.00 & 549.81 & 519.87 & 520.27 & 519.38 & 518.86 \\
# 1 & 529.24 & \r{514.30} & 516.25 & 514.58 & 515.10 & 516.28 \\
# 2 & 522.18 & 516.29 & 517.16 & 515.77 & 513.24 & 514.73 \\
# 3 & \r{511.99} & 513.94 & 515.92 & 512.06 & 513.72 & 514.50 \\
# 4 & 513.93 & 512.89 & nan & nan & 514.81 & 516.08 \\
# 5 & 515.86 & 517.64 & nan & nan & nan & nan \\
# \hline
# \end{array}
# $$
#
# For the models estimated over missing dataset, the AIC chooses ARMA(1,1)
#
# $$
# \text{Replication of:}\\
# \textbf{Table 8.2} ~~ \text{AIC for different ARMA models with missing
# observations.}\\
# \begin{array}{lrrrrrr}
# \hline
# q & 0 & 1 & 2 & 3 & 4 & 5 \\
# \hline
# p & {} & {} & {} & {} & {} & {} \\
# 0 & 0.00 & 488.93 & 464.01 & 463.86 & 462.63 & 463.62 \\
# 1 & 468.01 & \r{457.54} & 459.35 & 458.66 & 459.15 & 461.01 \\
# 2 & 469.68 & nan & 460.48 & 459.43 & 459.23 & 460.47 \\
# 3 & 467.10 & 458.44 & 459.64 & 456.66 & 459.54 & 460.05 \\
# 4 & 469.00 & 459.52 & nan & 463.04 & 459.35 & 460.96 \\
# 5 & 471.32 & 461.26 & nan & nan & 461.00 & 462.97 \\
# \hline
# \end{array}
# $$
#
# **Note**: the AIC values are calculated differently than in Durbin and
# Koopman, but show overall similar trends.
# ### Postestimation
#
# Using the ARMA(1,1) specification selected above, we perform in-sample
# prediction and out-of-sample forecasting.
# Statespace
mod = sm.tsa.statespace.SARIMAX(dta_miss, order=(1, 0, 1))
res = mod.fit(disp=False)
print(res.summary())
# In-sample one-step-ahead predictions, and out-of-sample forecasts
nforecast = 20
predict = res.get_prediction(end=mod.nobs + nforecast)
idx = np.arange(len(predict.predicted_mean))
predict_ci = predict.conf_int(alpha=0.5)
# Graph
fig, ax = plt.subplots(figsize=(12, 6))
ax.xaxis.grid()
ax.plot(dta_miss, 'k.')
# Plot
ax.plot(idx[:-nforecast], predict.predicted_mean[:-nforecast], 'gray')
ax.plot(
idx[-nforecast:],
predict.predicted_mean[-nforecast:],
'k--',
linestyle='--',
linewidth=2)
ax.fill_between(idx, predict_ci[:, 0], predict_ci[:, 1], alpha=0.15)
ax.set(title='Figure 8.9 - Internet series')
| bsd-3-clause |
joergdietrich/astropy | docs/conf.py | 1 | 9437 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
#
# Astropy documentation build configuration file.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this file.
#
# All configuration values have a default. Some values are defined in
# the global Astropy configuration which is loaded here before anything else.
# See astropy.sphinx.conf for which values are set there.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('..'))
# IMPORTANT: the above commented section was generated by sphinx-quickstart, but
# is *NOT* appropriate for astropy or Astropy affiliated packages. It is left
# commented out with this explanation to make it clear why this should not be
# done. If the sys.path entry above is added, when the astropy.sphinx.conf
# import occurs, it will import the *source* version of astropy instead of the
# version installed (if invoked as "make html" or directly with sphinx), or the
# version in the build directory (if "python setup.py build_docs" is used).
# Thus, any C-extensions that are needed to build the documentation will *not*
# be accessible, and the documentation will not build correctly.
import os
ON_RTD = os.environ.get('READTHEDOCS') == 'True'
ON_TRAVIS = os.environ.get('TRAVIS') == 'true'
try:
import astropy_helpers
except ImportError:
# Building from inside the docs/ directory?
import os
import sys
if os.path.basename(os.getcwd()) == 'docs':
a_h_path = os.path.abspath(os.path.join('..', 'astropy_helpers'))
if os.path.isdir(a_h_path):
sys.path.insert(1, a_h_path)
# If that doesn't work trying to import from astropy_helpers below will
# still blow up
# Load all of the global Astropy configuration
from astropy_helpers.sphinx.conf import *
from astropy.extern import six
import astropy
# Use the astropy style when building docs
from astropy import visualization
plot_rcparams = visualization.astropy_mpl_docs_style
plot_apply_rcparams = True
plot_html_show_source_link = False
plot_formats = ['png', 'svg', 'pdf']
# -- General configuration ----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.1'
# To perform a Sphinx version check that needs to be more specific than
# major.minor, call `check_sphinx_version("x.y.z")` here.
check_sphinx_version("1.2.1")
# The intersphinx_mapping in astropy_helpers.sphinx.conf refers to astropy for
# the benefit of affiliated packages who want to refer to objects in the
# astropy core. However, we don't want to cyclically reference astropy in its
# own build so we remove it here.
del intersphinx_mapping['astropy']
# add any custom intersphinx for astropy
intersphinx_mapping['pytest'] = ('http://pytest.org/latest/', None)
intersphinx_mapping['ipython'] = ('http://ipython.readthedocs.io/en/stable/', None)
intersphinx_mapping['pandas'] = ('http://pandas.pydata.org/pandas-docs/stable/', None)
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns.append('_templates')
exclude_patterns.append('_pkgtemplate.rst')
# Add any paths that contain templates here, relative to this directory.
if 'templates_path' not in locals(): # in case parent conf.py defines it
templates_path = []
templates_path.append('_templates')
# This is added to the end of RST files - a good place to put substitutions to
# be used globally.
rst_epilog += """
.. |minimum_numpy_version| replace:: {0.__minimum_numpy_version__}
.. Astropy
.. _Astropy: http://astropy.org
.. _`Astropy mailing list`: https://mail.python.org/mailman/listinfo/astropy
.. _`astropy-dev mailing list`: http://groups.google.com/group/astropy-dev
""".format(astropy)
# -- Project information ------------------------------------------------------
project = u'Astropy'
author = u'The Astropy Developers'
copyright = u'2011-2016, ' + author
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
# The short X.Y version.
version = astropy.__version__.split('-', 1)[0]
# The full version, including alpha/beta/rc tags.
release = astropy.__version__
# -- Options for HTML output ---------------------------------------------------
# A NOTE ON HTML THEMES
#
# The global astropy configuration uses a custom theme,
# 'bootstrap-astropy', which is installed along with astropy. The
# theme has options for controlling the text of the logo in the upper
# left corner. This is how you would specify the options in order to
# override the theme defaults (The following options *are* the
# defaults, so we do not actually need to set them here.)
#html_theme_options = {
# 'logotext1': 'astro', # white, semi-bold
# 'logotext2': 'py', # orange, light
# 'logotext3': ':docs' # white, light
# }
# A different theme can be used, or other parts of this theme can be
# modified, by overriding some of the variables set in the global
# configuration. The variables set in the global configuration are
# listed below, commented out.
# Add any paths that contain custom themes here, relative to this directory.
# To use a different custom theme, add the directory containing the theme.
#html_theme_path = []
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes. To override the custom theme, set this to the
# name of a builtin theme or the name of a custom theme in html_theme_path.
#html_theme = None
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = ''
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = ''
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = '{0} v{1}'.format(project, release)
# Output file base name for HTML help builder.
htmlhelp_basename = project + 'doc'
# -- Options for LaTeX output --------------------------------------------------
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [('index', project + '.tex', project + u' Documentation',
author, 'manual')]
latex_logo = '_static/astropy_logo.pdf'
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [('index', project.lower(), project + u' Documentation',
[author], 1)]
# -- Options for the edit_on_github extension ----------------------------------------
extensions += ['astropy_helpers.sphinx.ext.edit_on_github']
# Don't import the module as "version" or it will override the
# "version" configuration parameter
from astropy import version as versionmod
edit_on_github_project = "astropy/astropy"
if versionmod.release:
edit_on_github_branch = "v{0}.{1}.x".format(
versionmod.major, versionmod.minor)
else:
edit_on_github_branch = "master"
edit_on_github_source_root = ""
edit_on_github_doc_root = "docs"
edit_on_github_skip_regex = '_.*|api/.*'
github_issues_url = 'https://github.com/astropy/astropy/issues/'
# Enable nitpicky mode - which ensures that all references in the docs
# resolve.
nitpicky = True
nitpick_ignore = []
for line in open('nitpick-exceptions'):
if line.strip() == "" or line.startswith("#"):
continue
dtype, target = line.split(None, 1)
target = target.strip()
nitpick_ignore.append((dtype, six.u(target)))
if six.PY2:
nitpick_ignore.extend([('py:obj', six.u('bases'))])
# -- Options for the Sphinx gallery -------------------------------------------
try:
import sphinx_gallery
extensions += ["sphinx_gallery.gen_gallery"]
sphinx_gallery_conf = {
'mod_example_dir': 'generated/modules', # path to store the module using example template
'filename_pattern': '^((?!skip_).)*$', # execute all examples except those that start with "skip_"
'examples_dirs': '..{}examples'.format(os.sep), # path to the examples scripts
'gallery_dirs': 'generated/examples', # path to save gallery generated examples
'reference_url': {
'astropy': None,
'matplotlib': 'http://matplotlib.org/',
'numpy': 'http://docs.scipy.org/doc/numpy/',
},
'abort_on_example_error': True
}
except ImportError:
def setup(app):
app.warn('The sphinx_gallery extension is not installed, so the '
'gallery will not be built. You will probably see '
'additional warnings about undefined references due '
'to this.')
| bsd-3-clause |
kingtaurus/rs_binary_reader | read_binary.py | 1 | 4871 | import io
import os
import sys
from struct import pack, unpack, calcsize
#
import re
numbers = re.compile('([0-9]+)')
import numpy as np
import matplotlib.pyplot as plt
# try:
# import seaborn
# except ImportError as e:
# print("seaborn is not available!")
import glob
def numerical_match_first(value):
if len(numbers.findall(os.path.basename(value))) < 1:
raise ValueError('Value = ' + value + " doesn't contain a number")
parts = int( numbers.findall(os.path.basename(value))[0] )
return parts
#I <---> unsigned 32 bit int
struct_format = 'IdI'
struct_format_uint32 = 'I'
struct_format_doses = 'd'
struct_format_indices = 'I'
y_slice = 70
#this needs to be made general
def calculate_x_indices():
x_bins = np.linspace(-26, 24, 126)
low = np.min(np.where(x_bins >= -2 ))
high = np.max(np.where(x_bins < 7 ))
print(x_bins[low], x_bins[high])
return (low, high)
def slice_indices(lower_edge, upper_edge, lower_bound, upper_bound):
return None
#this needs to be made general
def calculate_z_indices():
z_bins = np.linspace(-25, 25, 126)
low = np.min(np.where(z_bins >= -1 ))
high = np.max(np.where(z_bins < 4 ))
print(z_bins[low], z_bins[high])
return (low, high)
def main(show_spectrum=False):
#so, now have the correct indices to slice out the parameters
low_x, high_x = calculate_x_indices()
low_z, high_z = calculate_z_indices()
# Need the size of binary reads
read_count = calcsize(struct_format_uint32)
read_dose = calcsize(struct_format_doses)
read_indices = calcsize(struct_format_indices)
#these files are the files to read
files = glob.glob('data_files/beamlet*.dat')
g = sorted(files, key=numerical_match_first)
print(g)
#just iterate over the globbed files;
for dat_file in g:
print(dat_file)
number = numerical_match_first(dat_file)
outfile = "output_" + str(number) + ".dat"
#need to keep the number extension the same
print(dat_file, outfile)
with open(dat_file, 'rb') as f:
data = f.read(read_count)
count, = unpack(struct_format_uint32, data)
print(count)
list_doses = []
list_indices = []
for i in range(count):
data = f.read(read_dose)
s, = unpack(struct_format_doses, data)
list_doses.append(s)
#print(s)
for i in range(count):
data = f.read(read_indices)
s, = unpack(struct_format_indices, data)
list_indices.append(s)
#print(s)
dose_grid = np.zeros((125,125,125))
for dose, index in zip(list_doses, list_indices):
dose_grid[np.unravel_index(index, dims=(125,125,125))] = dose
# print("argmax = ", np.argmax(dose_grid), np.unravel_index(np.argmax(dose_grid), dose_grid.shape))
# plt.figure()
# dose_max_slice = np.max(dose_grid[:, y_slice, :])
# plt.imshow(dose_grid[:, y_slice, :] / dose_max_slice, cmap=plt.get_cmap('cool'))
# ax = plt.gca()
# ax.grid(False)
# plt.show()
sub_dose_grid = np.array(dose_grid[low_x:high_x+1, :, low_z:high_z+1])
print("shape: ", sub_dose_grid.shape)
print("# elements: ", len(np.nonzero(sub_dose_grid.ravel())[0]))
# print("elements: ", np.nonzero(sub_dose_grid.ravel()))
elements = len(np.nonzero(sub_dose_grid.ravel())[0])
non_zero_index = np.nonzero(sub_dose_grid.ravel())[0]
sub_dose_grid_ravel = sub_dose_grid.ravel()
with open(outfile, 'wb') as out_file:
out_file.write(pack('I', elements))
for index_value in non_zero_index:
out_file.write(pack('d', sub_dose_grid_ravel[index_value]))
for index_value in non_zero_index:
out_file.write(pack('I', index_value))
with open('test_file_ascii_1.dat', 'w') as ascii_file:
ascii_file.write(str(elements) + " ")
for index_value in non_zero_index:
ascii_file.write(str(sub_dose_grid_ravel[index_value]) + " ")
for index_value in non_zero_index:
ascii_file.write(str(index_value) + " ")
# with open('test_file_full_ascii_1.dat', 'w') as ascii_file:
# ascii_file.write(str(elements) + " ")
# for index_value in non_zero_index:
# ascii_file.write(str(sub_dose_grid_ravel[index_value]) + " ")
# for index_value in non_zero_index:
# ascii_file.write(str(index_value) + " ")
#print(125**3)
# print("converting to numpy array")
# doses = np.array(list_doses)
# print(np.mean(doses))
# print(np.std(doses))
# print("done calculation")
# print(np.unravel_index(list_indices, (125,125,125)))
if show_spectrum is True:
plt.figure()
ax = plt.subplot(111)
plt.hist(doses, bins=1000)
ax.set_yscale("log", nonposy='clip')
plt.show()
return 0
if __name__ == '__main__':
main(show_spectrum=False)
| apache-2.0 |
Achuth17/scikit-learn | examples/svm/plot_iris.py | 62 | 3251 | """
==================================================
Plot different SVM classifiers in the iris dataset
==================================================
Comparison of different linear SVM classifiers on a 2D projection of the iris
dataset. We only consider the first 2 features of this dataset:
- Sepal length
- Sepal width
This example shows how to plot the decision surface for four SVM classifiers
with different kernels.
The linear models ``LinearSVC()`` and ``SVC(kernel='linear')`` yield slightly
different decision boundaries. This can be a consequence of the following
differences:
- ``LinearSVC`` minimizes the squared hinge loss while ``SVC`` minimizes the
regular hinge loss.
- ``LinearSVC`` uses the One-vs-All (also known as One-vs-Rest) multiclass
reduction while ``SVC`` uses the One-vs-One multiclass reduction.
Both linear models have linear decision boundaries (intersecting hyperplanes)
while the non-linear kernel models (polynomial or Gaussian RBF) have more
flexible non-linear decision boundaries with shapes that depend on the kind of
kernel and its parameters.
.. NOTE:: while plotting the decision function of classifiers for toy 2D
datasets can help get an intuitive understanding of their respective
expressive power, be aware that those intuitions don't always generalize to
more realistic high-dimensional problem.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
h = .02 # step size in the mesh
# we create an instance of SVM and fit out data. We do not scale our
# data since we want to plot the support vectors
C = 1.0 # SVM regularization parameter
svc = svm.SVC(kernel='linear', C=C).fit(X, y)
rbf_svc = svm.SVC(kernel='rbf', gamma=0.7, C=C).fit(X, y)
poly_svc = svm.SVC(kernel='poly', degree=3, C=C).fit(X, y)
lin_svc = svm.LinearSVC(C=C).fit(X, y)
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# title for the plots
titles = ['SVC with linear kernel',
'LinearSVC (linear kernel)',
'SVC with RBF kernel',
'SVC with polynomial (degree 3) kernel']
for i, clf in enumerate((svc, lin_svc, rbf_svc, poly_svc)):
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
plt.subplot(2, 2, i + 1)
plt.subplots_adjust(wspace=0.4, hspace=0.4)
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.contourf(xx, yy, Z, cmap=plt.cm.Paired, alpha=0.8)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Paired)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.xticks(())
plt.yticks(())
plt.title(titles[i])
plt.show()
| bsd-3-clause |
rew4332/tensorflow | tensorflow/contrib/learn/python/learn/preprocessing/tests/categorical_test.py | 30 | 2249 | # encoding: utf-8
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Categorical tests."""
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow.contrib.learn.python.learn.learn_io import HAS_PANDAS
from tensorflow.contrib.learn.python.learn.preprocessing import categorical
class CategoricalTest(tf.test.TestCase):
"""Categorical tests."""
def testSingleCategoricalProcessor(self):
cat_processor = categorical.CategoricalProcessor(min_frequency=1)
x = cat_processor.fit_transform([["0"], [1], [float("nan")], ["C"], ["C"],
[1], ["0"], [np.nan], [3]])
self.assertAllEqual(list(x), [[2], [1], [0], [3], [3], [1], [2], [0], [0]])
def testSingleCategoricalProcessorPandasSingleDF(self):
if HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
cat_processor = categorical.CategoricalProcessor()
data = pd.DataFrame({"Gender": ["Male", "Female", "Male"]})
x = list(cat_processor.fit_transform(data))
self.assertAllEqual(list(x), [[1], [2], [1]])
def testMultiCategoricalProcessor(self):
cat_processor = categorical.CategoricalProcessor(min_frequency=0,
share=False)
x = cat_processor.fit_transform([["0", "Male"], [1, "Female"], ["3", "Male"]
])
self.assertAllEqual(list(x), [[1, 1], [2, 2], [3, 1]])
if __name__ == "__main__":
tf.test.main()
| apache-2.0 |
OpenBookProjects/ipynb | 101notebook/ipython-rel-2.1.0-examples/Parallel Computing/wave2D/parallelwave-mpi.py | 4 | 6671 | #!/usr/bin/env python
"""
A simple python program of solving a 2D wave equation in parallel.
Domain partitioning and inter-processor communication
are done by an object of class MPIRectPartitioner2D
(which is a subclass of RectPartitioner2D and uses MPI via mpi4py)
An example of running the program is (8 processors, 4x2 partition,
400x100 grid cells)::
$ ipcluster start --engines=MPIExec -n 8 # start 8 engines with mpiexec
$ python parallelwave-mpi.py --grid 400 100 --partition 4 2
See also parallelwave-mpi, which runs the same program, but uses MPI
(via mpi4py) for the inter-engine communication.
Authors
-------
* Xing Cai
* Min Ragan-Kelley
"""
import sys
import time
from numpy import exp, zeros, newaxis, sqrt
from IPython.external import argparse
from IPython.parallel import Client, Reference
def setup_partitioner(index, num_procs, gnum_cells, parts):
"""create a partitioner in the engine namespace"""
global partitioner
p = MPIRectPartitioner2D(my_id=index, num_procs=num_procs)
p.redim(global_num_cells=gnum_cells, num_parts=parts)
p.prepare_communication()
# put the partitioner into the global namespace:
partitioner=p
def setup_solver(*args, **kwargs):
"""create a WaveSolver in the engine namespace"""
global solver
solver = WaveSolver(*args, **kwargs)
def wave_saver(u, x, y, t):
"""save the wave log"""
global u_hist
global t_hist
t_hist.append(t)
u_hist.append(1.0*u)
# main program:
if __name__ == '__main__':
parser = argparse.ArgumentParser()
paa = parser.add_argument
paa('--grid', '-g',
type=int, nargs=2, default=[100,100], dest='grid',
help="Cells in the grid, e.g. --grid 100 200")
paa('--partition', '-p',
type=int, nargs=2, default=None,
help="Process partition grid, e.g. --partition 4 2 for 4x2")
paa('-c',
type=float, default=1.,
help="Wave speed (I think)")
paa('-Ly',
type=float, default=1.,
help="system size (in y)")
paa('-Lx',
type=float, default=1.,
help="system size (in x)")
paa('-t', '--tstop',
type=float, default=1.,
help="Time units to run")
paa('--profile',
type=unicode, default=u'default',
help="Specify the ipcluster profile for the client to connect to.")
paa('--save',
action='store_true',
help="Add this flag to save the time/wave history during the run.")
paa('--scalar',
action='store_true',
help="Also run with scalar interior implementation, to see vector speedup.")
ns = parser.parse_args()
# set up arguments
grid = ns.grid
partition = ns.partition
Lx = ns.Lx
Ly = ns.Ly
c = ns.c
tstop = ns.tstop
if ns.save:
user_action = wave_saver
else:
user_action = None
num_cells = 1.0*(grid[0]-1)*(grid[1]-1)
final_test = True
# create the Client
rc = Client(profile=ns.profile)
num_procs = len(rc.ids)
if partition is None:
partition = [1,num_procs]
assert partition[0]*partition[1] == num_procs, "can't map partition %s to %i engines"%(partition, num_procs)
view = rc[:]
print "Running %s system on %s processes until %f"%(grid, partition, tstop)
# functions defining initial/boundary/source conditions
def I(x,y):
from numpy import exp
return 1.5*exp(-100*((x-0.5)**2+(y-0.5)**2))
def f(x,y,t):
return 0.0
# from numpy import exp,sin
# return 10*exp(-(x - sin(100*t))**2)
def bc(x,y,t):
return 0.0
# initial imports, setup rank
view.execute('\n'.join([
"from mpi4py import MPI",
"import numpy",
"mpi = MPI.COMM_WORLD",
"my_id = MPI.COMM_WORLD.Get_rank()"]), block=True)
# initialize t_hist/u_hist for saving the state at each step (optional)
view['t_hist'] = []
view['u_hist'] = []
# set vector/scalar implementation details
impl = {}
impl['ic'] = 'vectorized'
impl['inner'] = 'scalar'
impl['bc'] = 'vectorized'
# execute some files so that the classes we need will be defined on the engines:
view.run('RectPartitioner.py')
view.run('wavesolver.py')
# setup remote partitioner
# note that Reference means that the argument passed to setup_partitioner will be the
# object named 'my_id' in the engine's namespace
view.apply_sync(setup_partitioner, Reference('my_id'), num_procs, grid, partition)
# wait for initial communication to complete
view.execute('mpi.barrier()')
# setup remote solvers
view.apply_sync(setup_solver, I,f,c,bc,Lx,Ly,partitioner=Reference('partitioner'), dt=0,implementation=impl)
# lambda for calling solver.solve:
_solve = lambda *args, **kwargs: solver.solve(*args, **kwargs)
if ns.scalar:
impl['inner'] = 'scalar'
# run first with element-wise Python operations for each cell
t0 = time.time()
ar = view.apply_async(_solve, tstop, dt=0, verbose=True, final_test=final_test, user_action=user_action)
if final_test:
# this sum is performed element-wise as results finish
s = sum(ar)
# the L2 norm (RMS) of the result:
norm = sqrt(s/num_cells)
else:
norm = -1
t1 = time.time()
print 'scalar inner-version, Wtime=%g, norm=%g'%(t1-t0, norm)
impl['inner'] = 'vectorized'
# setup new solvers
view.apply_sync(setup_solver, I,f,c,bc,Lx,Ly,partitioner=Reference('partitioner'), dt=0,implementation=impl)
view.execute('mpi.barrier()')
# run again with numpy vectorized inner-implementation
t0 = time.time()
ar = view.apply_async(_solve, tstop, dt=0, verbose=True, final_test=final_test, user_action=user_action)
if final_test:
# this sum is performed element-wise as results finish
s = sum(ar)
# the L2 norm (RMS) of the result:
norm = sqrt(s/num_cells)
else:
norm = -1
t1 = time.time()
print 'vector inner-version, Wtime=%g, norm=%g'%(t1-t0, norm)
# if ns.save is True, then u_hist stores the history of u as a list
# If the partion scheme is Nx1, then u can be reconstructed via 'gather':
if ns.save and partition[-1] == 1:
import matplotlib.pyplot as plt
view.execute('u_last=u_hist[-1]')
# map mpi IDs to IPython IDs, which may not match
ranks = view['my_id']
targets = range(len(ranks))
for idx in range(len(ranks)):
targets[idx] = ranks.index(idx)
u_last = rc[targets].gather('u_last', block=True)
plt.pcolor(u_last)
plt.show()
| mit |
linebp/pandas | pandas/io/pytables.py | 1 | 161539 | """
High level interface to PyTables for reading and writing pandas data structures
to disk
"""
# pylint: disable-msg=E1101,W0613,W0603
from datetime import datetime, date
import time
import re
import copy
import itertools
import warnings
import os
from pandas.core.dtypes.common import (
is_list_like,
is_categorical_dtype,
is_timedelta64_dtype,
is_datetime64tz_dtype,
is_datetime64_dtype,
_ensure_object,
_ensure_int64,
_ensure_platform_int)
from pandas.core.dtypes.missing import array_equivalent
import numpy as np
from pandas import (Series, DataFrame, Panel, Panel4D, Index,
MultiIndex, Int64Index, isnull, concat,
SparseSeries, SparseDataFrame, PeriodIndex,
DatetimeIndex, TimedeltaIndex)
from pandas.core import config
from pandas.io.common import _stringify_path
from pandas.core.sparse.array import BlockIndex, IntIndex
from pandas.core.base import StringMixin
from pandas.io.formats.printing import adjoin, pprint_thing
from pandas.errors import PerformanceWarning
from pandas.core.common import _asarray_tuplesafe
from pandas.core.algorithms import match, unique
from pandas.core.categorical import Categorical, _factorize_from_iterables
from pandas.core.internals import (BlockManager, make_block,
_block2d_to_blocknd,
_factor_indexer, _block_shape)
from pandas.core.index import _ensure_index
from pandas import compat
from pandas.compat import u_safe as u, PY3, range, lrange, string_types, filter
from pandas.core.config import get_option
from pandas.core.computation.pytables import Expr, maybe_expression
from pandas._libs import tslib, algos, lib
from distutils.version import LooseVersion
# versioning attribute
_version = '0.15.2'
# encoding
# PY3 encoding if we don't specify
_default_encoding = 'UTF-8'
def _ensure_decoded(s):
""" if we have bytes, decode them to unicode """
if isinstance(s, np.bytes_):
s = s.decode('UTF-8')
return s
def _ensure_encoding(encoding):
# set the encoding if we need
if encoding is None:
if PY3:
encoding = _default_encoding
return encoding
def _ensure_str(name):
"""Ensure that an index / column name is a str (python 3) or
unicode (python 2); otherwise they may be np.string dtype.
Non-string dtypes are passed through unchanged.
https://github.com/pandas-dev/pandas/issues/13492
"""
if isinstance(name, compat.string_types):
name = compat.text_type(name)
return name
Term = Expr
def _ensure_term(where, scope_level):
"""
ensure that the where is a Term or a list of Term
this makes sure that we are capturing the scope of variables
that are passed
create the terms here with a frame_level=2 (we are 2 levels down)
"""
# only consider list/tuple here as an ndarray is automaticaly a coordinate
# list
level = scope_level + 1
if isinstance(where, (list, tuple)):
wlist = []
for w in filter(lambda x: x is not None, where):
if not maybe_expression(w):
wlist.append(w)
else:
wlist.append(Term(w, scope_level=level))
where = wlist
elif maybe_expression(where):
where = Term(where, scope_level=level)
return where
class PossibleDataLossError(Exception):
pass
class ClosedFileError(Exception):
pass
class IncompatibilityWarning(Warning):
pass
incompatibility_doc = """
where criteria is being ignored as this version [%s] is too old (or
not-defined), read the file in and write it out to a new file to upgrade (with
the copy_to method)
"""
class AttributeConflictWarning(Warning):
pass
attribute_conflict_doc = """
the [%s] attribute of the existing index is [%s] which conflicts with the new
[%s], resetting the attribute to None
"""
class DuplicateWarning(Warning):
pass
duplicate_doc = """
duplicate entries in table, taking most recently appended
"""
performance_doc = """
your performance may suffer as PyTables will pickle object types that it cannot
map directly to c-types [inferred_type->%s,key->%s] [items->%s]
"""
# formats
_FORMAT_MAP = {
u('f'): 'fixed',
u('fixed'): 'fixed',
u('t'): 'table',
u('table'): 'table',
}
format_deprecate_doc = """
the table keyword has been deprecated
use the format='fixed(f)|table(t)' keyword instead
fixed(f) : specifies the Fixed format
and is the default for put operations
table(t) : specifies the Table format
and is the default for append operations
"""
# map object types
_TYPE_MAP = {
Series: u('series'),
SparseSeries: u('sparse_series'),
DataFrame: u('frame'),
SparseDataFrame: u('sparse_frame'),
Panel: u('wide'),
Panel4D: u('ndim'),
}
# storer class map
_STORER_MAP = {
u('Series'): 'LegacySeriesFixed',
u('DataFrame'): 'LegacyFrameFixed',
u('DataMatrix'): 'LegacyFrameFixed',
u('series'): 'SeriesFixed',
u('sparse_series'): 'SparseSeriesFixed',
u('frame'): 'FrameFixed',
u('sparse_frame'): 'SparseFrameFixed',
u('wide'): 'PanelFixed',
}
# table class map
_TABLE_MAP = {
u('generic_table'): 'GenericTable',
u('appendable_series'): 'AppendableSeriesTable',
u('appendable_multiseries'): 'AppendableMultiSeriesTable',
u('appendable_frame'): 'AppendableFrameTable',
u('appendable_multiframe'): 'AppendableMultiFrameTable',
u('appendable_panel'): 'AppendablePanelTable',
u('appendable_ndim'): 'AppendableNDimTable',
u('worm'): 'WORMTable',
u('legacy_frame'): 'LegacyFrameTable',
u('legacy_panel'): 'LegacyPanelTable',
}
# axes map
_AXES_MAP = {
DataFrame: [0],
Panel: [1, 2],
Panel4D: [1, 2, 3],
}
# register our configuration options
dropna_doc = """
: boolean
drop ALL nan rows when appending to a table
"""
format_doc = """
: format
default format writing format, if None, then
put will default to 'fixed' and append will default to 'table'
"""
with config.config_prefix('io.hdf'):
config.register_option('dropna_table', False, dropna_doc,
validator=config.is_bool)
config.register_option(
'default_format', None, format_doc,
validator=config.is_one_of_factory(['fixed', 'table', None])
)
# oh the troubles to reduce import time
_table_mod = None
_table_file_open_policy_is_strict = False
def _tables():
global _table_mod
global _table_file_open_policy_is_strict
if _table_mod is None:
import tables
_table_mod = tables
# version requirements
if LooseVersion(tables.__version__) < '3.0.0':
raise ImportError("PyTables version >= 3.0.0 is required")
# set the file open policy
# return the file open policy; this changes as of pytables 3.1
# depending on the HDF5 version
try:
_table_file_open_policy_is_strict = (
tables.file._FILE_OPEN_POLICY == 'strict')
except:
pass
return _table_mod
# interface to/from ###
def to_hdf(path_or_buf, key, value, mode=None, complevel=None, complib=None,
append=None, **kwargs):
""" store this object, close it if we opened it """
if append:
f = lambda store: store.append(key, value, **kwargs)
else:
f = lambda store: store.put(key, value, **kwargs)
path_or_buf = _stringify_path(path_or_buf)
if isinstance(path_or_buf, string_types):
with HDFStore(path_or_buf, mode=mode, complevel=complevel,
complib=complib) as store:
f(store)
else:
f(path_or_buf)
def read_hdf(path_or_buf, key=None, **kwargs):
""" read from the store, close it if we opened it
Retrieve pandas object stored in file, optionally based on where
criteria
Parameters
----------
path_or_buf : path (string), buffer, or path object (pathlib.Path or
py._path.local.LocalPath) to read from
.. versionadded:: 0.19.0 support for pathlib, py.path.
key : group identifier in the store. Can be omitted if the HDF file
contains a single pandas object.
where : list of Term (or convertable) objects, optional
start : optional, integer (defaults to None), row number to start
selection
stop : optional, integer (defaults to None), row number to stop
selection
columns : optional, a list of columns that if not None, will limit the
return columns
iterator : optional, boolean, return an iterator, default False
chunksize : optional, nrows to include in iteration, return an iterator
Returns
-------
The selected object
"""
if kwargs.get('mode', 'a') not in ['r', 'r+', 'a']:
raise ValueError('mode {0} is not allowed while performing a read. '
'Allowed modes are r, r+ and a.'
.format(kwargs.get('mode')))
# grab the scope
if 'where' in kwargs:
kwargs['where'] = _ensure_term(kwargs['where'], scope_level=1)
if isinstance(path_or_buf, HDFStore):
if not path_or_buf.is_open:
raise IOError('The HDFStore must be open for reading.')
store = path_or_buf
auto_close = False
else:
path_or_buf = _stringify_path(path_or_buf)
if not isinstance(path_or_buf, string_types):
raise NotImplementedError('Support for generic buffers has not '
'been implemented.')
try:
exists = os.path.exists(path_or_buf)
# if filepath is too long
except (TypeError, ValueError):
exists = False
if not exists:
raise compat.FileNotFoundError(
'File %s does not exist' % path_or_buf)
store = HDFStore(path_or_buf, **kwargs)
# can't auto open/close if we are using an iterator
# so delegate to the iterator
auto_close = True
try:
if key is None:
groups = store.groups()
if len(groups) == 0:
raise ValueError('No dataset in HDF5 file.')
candidate_only_group = groups[0]
# For the HDF file to have only one dataset, all other groups
# should then be metadata groups for that candidate group. (This
# assumes that the groups() method enumerates parent groups
# before their children.)
for group_to_check in groups[1:]:
if not _is_metadata_of(group_to_check, candidate_only_group):
raise ValueError('key must be provided when HDF5 file '
'contains multiple datasets.')
key = candidate_only_group._v_pathname
return store.select(key, auto_close=auto_close, **kwargs)
except:
# if there is an error, close the store
try:
store.close()
except:
pass
raise
def _is_metadata_of(group, parent_group):
"""Check if a given group is a metadata group for a given parent_group."""
if group._v_depth <= parent_group._v_depth:
return False
current = group
while current._v_depth > 1:
parent = current._v_parent
if parent == parent_group and current._v_name == 'meta':
return True
current = current._v_parent
return False
class HDFStore(StringMixin):
"""
dict-like IO interface for storing pandas objects in PyTables
either Fixed or Table format.
Parameters
----------
path : string
File path to HDF5 file
mode : {'a', 'w', 'r', 'r+'}, default 'a'
``'r'``
Read-only; no data can be modified.
``'w'``
Write; a new file is created (an existing file with the same
name would be deleted).
``'a'``
Append; an existing file is opened for reading and writing,
and if the file does not exist it is created.
``'r+'``
It is similar to ``'a'``, but the file must already exist.
complevel : int, 0-9, default None
Specifies a compression level for data.
A value of 0 disables compression.
complib : {'zlib', 'lzo', 'bzip2', 'blosc'}, default 'zlib'
Specifies the compression library to be used.
As of v0.20.2 these additional compressors for Blosc are supported
(default if no compressor specified: 'blosc:blosclz'):
{'blosc:blosclz', 'blosc:lz4', 'blosc:lz4hc', 'blosc:snappy',
'blosc:zlib', 'blosc:zstd'}.
Specifying a compression library which is not available issues
a ValueError.
fletcher32 : bool, default False
If applying compression use the fletcher32 checksum
Examples
--------
>>> from pandas import DataFrame
>>> from numpy.random import randn
>>> bar = DataFrame(randn(10, 4))
>>> store = HDFStore('test.h5')
>>> store['foo'] = bar # write to HDF5
>>> bar = store['foo'] # retrieve
>>> store.close()
"""
def __init__(self, path, mode=None, complevel=None, complib=None,
fletcher32=False, **kwargs):
try:
import tables # noqa
except ImportError as ex: # pragma: no cover
raise ImportError('HDFStore requires PyTables, "{ex}" problem '
'importing'.format(ex=str(ex)))
if complib is not None and complib not in tables.filters.all_complibs:
raise ValueError(
"complib only supports {libs} compression.".format(
libs=tables.filters.all_complibs))
if complib is None and complevel is not None:
complib = tables.filters.default_complib
self._path = _stringify_path(path)
if mode is None:
mode = 'a'
self._mode = mode
self._handle = None
self._complevel = complevel if complevel else 0
self._complib = complib
self._fletcher32 = fletcher32
self._filters = None
self.open(mode=mode, **kwargs)
def __fspath__(self):
return self._path
@property
def root(self):
""" return the root node """
self._check_if_open()
return self._handle.root
@property
def filename(self):
return self._path
def __getitem__(self, key):
return self.get(key)
def __setitem__(self, key, value):
self.put(key, value)
def __delitem__(self, key):
return self.remove(key)
def __getattr__(self, name):
""" allow attribute access to get stores """
try:
return self.get(name)
except:
pass
raise AttributeError("'%s' object has no attribute '%s'" %
(type(self).__name__, name))
def __contains__(self, key):
""" check for existance of this key
can match the exact pathname or the pathnm w/o the leading '/'
"""
node = self.get_node(key)
if node is not None:
name = node._v_pathname
if name == key or name[1:] == key:
return True
return False
def __len__(self):
return len(self.groups())
def __unicode__(self):
return '%s\nFile path: %s\n' % (type(self), pprint_thing(self._path))
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def keys(self):
"""
Return a (potentially unordered) list of the keys corresponding to the
objects stored in the HDFStore. These are ABSOLUTE path-names (e.g.
have the leading '/'
"""
return [n._v_pathname for n in self.groups()]
def __iter__(self):
return iter(self.keys())
def items(self):
"""
iterate on key->group
"""
for g in self.groups():
yield g._v_pathname, g
iteritems = items
def open(self, mode='a', **kwargs):
"""
Open the file in the specified mode
Parameters
----------
mode : {'a', 'w', 'r', 'r+'}, default 'a'
See HDFStore docstring or tables.open_file for info about modes
"""
tables = _tables()
if self._mode != mode:
# if we are changing a write mode to read, ok
if self._mode in ['a', 'w'] and mode in ['r', 'r+']:
pass
elif mode in ['w']:
# this would truncate, raise here
if self.is_open:
raise PossibleDataLossError(
"Re-opening the file [{0}] with mode [{1}] "
"will delete the current file!"
.format(self._path, self._mode)
)
self._mode = mode
# close and reopen the handle
if self.is_open:
self.close()
if self._complevel and self._complevel > 0:
self._filters = _tables().Filters(self._complevel, self._complib,
fletcher32=self._fletcher32)
try:
self._handle = tables.open_file(self._path, self._mode, **kwargs)
except (IOError) as e: # pragma: no cover
if 'can not be written' in str(e):
print('Opening %s in read-only mode' % self._path)
self._handle = tables.open_file(self._path, 'r', **kwargs)
else:
raise
except (ValueError) as e:
# trap PyTables >= 3.1 FILE_OPEN_POLICY exception
# to provide an updated message
if 'FILE_OPEN_POLICY' in str(e):
e = ValueError(
"PyTables [{version}] no longer supports opening multiple "
"files\n"
"even in read-only mode on this HDF5 version "
"[{hdf_version}]. You can accept this\n"
"and not open the same file multiple times at once,\n"
"upgrade the HDF5 version, or downgrade to PyTables 3.0.0 "
"which allows\n"
"files to be opened multiple times at once\n"
.format(version=tables.__version__,
hdf_version=tables.get_hdf5_version()))
raise e
except (Exception) as e:
# trying to read from a non-existant file causes an error which
# is not part of IOError, make it one
if self._mode == 'r' and 'Unable to open/create file' in str(e):
raise IOError(str(e))
raise
def close(self):
"""
Close the PyTables file handle
"""
if self._handle is not None:
self._handle.close()
self._handle = None
@property
def is_open(self):
"""
return a boolean indicating whether the file is open
"""
if self._handle is None:
return False
return bool(self._handle.isopen)
def flush(self, fsync=False):
"""
Force all buffered modifications to be written to disk.
Parameters
----------
fsync : bool (default False)
call ``os.fsync()`` on the file handle to force writing to disk.
Notes
-----
Without ``fsync=True``, flushing may not guarantee that the OS writes
to disk. With fsync, the operation will block until the OS claims the
file has been written; however, other caching layers may still
interfere.
"""
if self._handle is not None:
self._handle.flush()
if fsync:
try:
os.fsync(self._handle.fileno())
except:
pass
def get(self, key):
"""
Retrieve pandas object stored in file
Parameters
----------
key : object
Returns
-------
obj : type of object stored in file
"""
group = self.get_node(key)
if group is None:
raise KeyError('No object named %s in the file' % key)
return self._read_group(group)
def select(self, key, where=None, start=None, stop=None, columns=None,
iterator=False, chunksize=None, auto_close=False, **kwargs):
"""
Retrieve pandas object stored in file, optionally based on where
criteria
Parameters
----------
key : object
where : list of Term (or convertable) objects, optional
start : integer (defaults to None), row number to start selection
stop : integer (defaults to None), row number to stop selection
columns : a list of columns that if not None, will limit the return
columns
iterator : boolean, return an iterator, default False
chunksize : nrows to include in iteration, return an iterator
auto_close : boolean, should automatically close the store when
finished, default is False
Returns
-------
The selected object
"""
group = self.get_node(key)
if group is None:
raise KeyError('No object named %s in the file' % key)
# create the storer and axes
where = _ensure_term(where, scope_level=1)
s = self._create_storer(group)
s.infer_axes()
# function to call on iteration
def func(_start, _stop, _where):
return s.read(start=_start, stop=_stop,
where=_where,
columns=columns, **kwargs)
# create the iterator
it = TableIterator(self, s, func, where=where, nrows=s.nrows,
start=start, stop=stop, iterator=iterator,
chunksize=chunksize, auto_close=auto_close)
return it.get_result()
def select_as_coordinates(
self, key, where=None, start=None, stop=None, **kwargs):
"""
return the selection as an Index
Parameters
----------
key : object
where : list of Term (or convertable) objects, optional
start : integer (defaults to None), row number to start selection
stop : integer (defaults to None), row number to stop selection
"""
where = _ensure_term(where, scope_level=1)
return self.get_storer(key).read_coordinates(where=where, start=start,
stop=stop, **kwargs)
def select_column(self, key, column, **kwargs):
"""
return a single column from the table. This is generally only useful to
select an indexable
Parameters
----------
key : object
column: the column of interest
Exceptions
----------
raises KeyError if the column is not found (or key is not a valid
store)
raises ValueError if the column can not be extracted individually (it
is part of a data block)
"""
return self.get_storer(key).read_column(column=column, **kwargs)
def select_as_multiple(self, keys, where=None, selector=None, columns=None,
start=None, stop=None, iterator=False,
chunksize=None, auto_close=False, **kwargs):
""" Retrieve pandas objects from multiple tables
Parameters
----------
keys : a list of the tables
selector : the table to apply the where criteria (defaults to keys[0]
if not supplied)
columns : the columns I want back
start : integer (defaults to None), row number to start selection
stop : integer (defaults to None), row number to stop selection
iterator : boolean, return an iterator, default False
chunksize : nrows to include in iteration, return an iterator
Exceptions
----------
raises KeyError if keys or selector is not found or keys is empty
raises TypeError if keys is not a list or tuple
raises ValueError if the tables are not ALL THE SAME DIMENSIONS
"""
# default to single select
where = _ensure_term(where, scope_level=1)
if isinstance(keys, (list, tuple)) and len(keys) == 1:
keys = keys[0]
if isinstance(keys, string_types):
return self.select(key=keys, where=where, columns=columns,
start=start, stop=stop, iterator=iterator,
chunksize=chunksize, **kwargs)
if not isinstance(keys, (list, tuple)):
raise TypeError("keys must be a list/tuple")
if not len(keys):
raise ValueError("keys must have a non-zero length")
if selector is None:
selector = keys[0]
# collect the tables
tbls = [self.get_storer(k) for k in keys]
s = self.get_storer(selector)
# validate rows
nrows = None
for t, k in itertools.chain([(s, selector)], zip(tbls, keys)):
if t is None:
raise KeyError("Invalid table [%s]" % k)
if not t.is_table:
raise TypeError(
"object [%s] is not a table, and cannot be used in all "
"select as multiple" % t.pathname
)
if nrows is None:
nrows = t.nrows
elif t.nrows != nrows:
raise ValueError(
"all tables must have exactly the same nrows!")
# axis is the concentation axes
axis = list(set([t.non_index_axes[0][0] for t in tbls]))[0]
def func(_start, _stop, _where):
# retrieve the objs, _where is always passed as a set of
# coordinates here
objs = [t.read(where=_where, columns=columns, start=_start,
stop=_stop, **kwargs) for t in tbls]
# concat and return
return concat(objs, axis=axis,
verify_integrity=False)._consolidate()
# create the iterator
it = TableIterator(self, s, func, where=where, nrows=nrows,
start=start, stop=stop, iterator=iterator,
chunksize=chunksize, auto_close=auto_close)
return it.get_result(coordinates=True)
def put(self, key, value, format=None, append=False, **kwargs):
"""
Store object in HDFStore
Parameters
----------
key : object
value : {Series, DataFrame, Panel}
format : 'fixed(f)|table(t)', default is 'fixed'
fixed(f) : Fixed format
Fast writing/reading. Not-appendable, nor searchable
table(t) : Table format
Write as a PyTables Table structure which may perform
worse but allow more flexible operations like searching
/ selecting subsets of the data
append : boolean, default False
This will force Table format, append the input data to the
existing.
data_columns : list of columns to create as data columns, or True to
use all columns. See
`here <http://pandas.pydata.org/pandas-docs/stable/io.html#query-via-data-columns>`__ # noqa
encoding : default None, provide an encoding for strings
dropna : boolean, default False, do not write an ALL nan row to
the store settable by the option 'io.hdf.dropna_table'
"""
if format is None:
format = get_option("io.hdf.default_format") or 'fixed'
kwargs = self._validate_format(format, kwargs)
self._write_to_group(key, value, append=append, **kwargs)
def remove(self, key, where=None, start=None, stop=None):
"""
Remove pandas object partially by specifying the where condition
Parameters
----------
key : string
Node to remove or delete rows from
where : list of Term (or convertable) objects, optional
start : integer (defaults to None), row number to start selection
stop : integer (defaults to None), row number to stop selection
Returns
-------
number of rows removed (or None if not a Table)
Exceptions
----------
raises KeyError if key is not a valid store
"""
where = _ensure_term(where, scope_level=1)
try:
s = self.get_storer(key)
except:
if where is not None:
raise ValueError(
"trying to remove a node with a non-None where clause!")
# we are actually trying to remove a node (with children)
s = self.get_node(key)
if s is not None:
s._f_remove(recursive=True)
return None
if s is None:
raise KeyError('No object named %s in the file' % key)
# remove the node
if where is None and start is None and stop is None:
s.group._f_remove(recursive=True)
# delete from the table
else:
if not s.is_table:
raise ValueError(
'can only remove with where on objects written as tables')
return s.delete(where=where, start=start, stop=stop)
def append(self, key, value, format=None, append=True, columns=None,
dropna=None, **kwargs):
"""
Append to Table in file. Node must already exist and be Table
format.
Parameters
----------
key : object
value : {Series, DataFrame, Panel, Panel4D}
format: 'table' is the default
table(t) : table format
Write as a PyTables Table structure which may perform
worse but allow more flexible operations like searching
/ selecting subsets of the data
append : boolean, default True, append the input data to the
existing
data_columns : list of columns, or True, default None
List of columns to create as indexed data columns for on-disk
queries, or True to use all columns. By default only the axes
of the object are indexed. See `here
<http://pandas.pydata.org/pandas-docs/stable/io.html#query-via-data-columns>`__.
min_itemsize : dict of columns that specify minimum string sizes
nan_rep : string to use as string nan represenation
chunksize : size to chunk the writing
expectedrows : expected TOTAL row size of this table
encoding : default None, provide an encoding for strings
dropna : boolean, default False, do not write an ALL nan row to
the store settable by the option 'io.hdf.dropna_table'
Notes
-----
Does *not* check if data being appended overlaps with existing
data in the table, so be careful
"""
if columns is not None:
raise TypeError("columns is not a supported keyword in append, "
"try data_columns")
if dropna is None:
dropna = get_option("io.hdf.dropna_table")
if format is None:
format = get_option("io.hdf.default_format") or 'table'
kwargs = self._validate_format(format, kwargs)
self._write_to_group(key, value, append=append, dropna=dropna,
**kwargs)
def append_to_multiple(self, d, value, selector, data_columns=None,
axes=None, dropna=False, **kwargs):
"""
Append to multiple tables
Parameters
----------
d : a dict of table_name to table_columns, None is acceptable as the
values of one node (this will get all the remaining columns)
value : a pandas object
selector : a string that designates the indexable table; all of its
columns will be designed as data_columns, unless data_columns is
passed, in which case these are used
data_columns : list of columns to create as data columns, or True to
use all columns
dropna : if evaluates to True, drop rows from all tables if any single
row in each table has all NaN. Default False.
Notes
-----
axes parameter is currently not accepted
"""
if axes is not None:
raise TypeError("axes is currently not accepted as a parameter to"
" append_to_multiple; you can create the "
"tables independently instead")
if not isinstance(d, dict):
raise ValueError(
"append_to_multiple must have a dictionary specified as the "
"way to split the value"
)
if selector not in d:
raise ValueError(
"append_to_multiple requires a selector that is in passed dict"
)
# figure out the splitting axis (the non_index_axis)
axis = list(set(range(value.ndim)) - set(_AXES_MAP[type(value)]))[0]
# figure out how to split the value
remain_key = None
remain_values = []
for k, v in d.items():
if v is None:
if remain_key is not None:
raise ValueError(
"append_to_multiple can only have one value in d that "
"is None"
)
remain_key = k
else:
remain_values.extend(v)
if remain_key is not None:
ordered = value.axes[axis]
ordd = ordered.difference(Index(remain_values))
ordd = sorted(ordered.get_indexer(ordd))
d[remain_key] = ordered.take(ordd)
# data_columns
if data_columns is None:
data_columns = d[selector]
# ensure rows are synchronized across the tables
if dropna:
idxs = (value[cols].dropna(how='all').index for cols in d.values())
valid_index = next(idxs)
for index in idxs:
valid_index = valid_index.intersection(index)
value = value.loc[valid_index]
# append
for k, v in d.items():
dc = data_columns if k == selector else None
# compute the val
val = value.reindex_axis(v, axis=axis)
self.append(k, val, data_columns=dc, **kwargs)
def create_table_index(self, key, **kwargs):
""" Create a pytables index on the table
Parameters
----------
key : object (the node to index)
Exceptions
----------
raises if the node is not a table
"""
# version requirements
_tables()
s = self.get_storer(key)
if s is None:
return
if not s.is_table:
raise TypeError(
"cannot create table index on a Fixed format store")
s.create_index(**kwargs)
def groups(self):
"""return a list of all the top-level nodes (that are not themselves a
pandas storage object)
"""
_tables()
self._check_if_open()
return [
g for g in self._handle.walk_nodes()
if (getattr(g._v_attrs, 'pandas_type', None) or
getattr(g, 'table', None) or
(isinstance(g, _table_mod.table.Table) and
g._v_name != u('table')))
]
def get_node(self, key):
""" return the node with the key or None if it does not exist """
self._check_if_open()
try:
if not key.startswith('/'):
key = '/' + key
return self._handle.get_node(self.root, key)
except:
return None
def get_storer(self, key):
""" return the storer object for a key, raise if not in the file """
group = self.get_node(key)
if group is None:
return None
s = self._create_storer(group)
s.infer_axes()
return s
def copy(self, file, mode='w', propindexes=True, keys=None, complib=None,
complevel=None, fletcher32=False, overwrite=True):
""" copy the existing store to a new file, upgrading in place
Parameters
----------
propindexes: restore indexes in copied file (defaults to True)
keys : list of keys to include in the copy (defaults to all)
overwrite : overwrite (remove and replace) existing nodes in the
new store (default is True)
mode, complib, complevel, fletcher32 same as in HDFStore.__init__
Returns
-------
open file handle of the new store
"""
new_store = HDFStore(
file,
mode=mode,
complib=complib,
complevel=complevel,
fletcher32=fletcher32)
if keys is None:
keys = list(self.keys())
if not isinstance(keys, (tuple, list)):
keys = [keys]
for k in keys:
s = self.get_storer(k)
if s is not None:
if k in new_store:
if overwrite:
new_store.remove(k)
data = self.select(k)
if s.is_table:
index = False
if propindexes:
index = [a.name for a in s.axes if a.is_indexed]
new_store.append(
k, data, index=index,
data_columns=getattr(s, 'data_columns', None),
encoding=s.encoding
)
else:
new_store.put(k, data, encoding=s.encoding)
return new_store
def info(self):
"""
print detailed information on the store
.. versionadded:: 0.21.0
"""
output = '%s\nFile path: %s\n' % (type(self), pprint_thing(self._path))
if self.is_open:
lkeys = sorted(list(self.keys()))
if len(lkeys):
keys = []
values = []
for k in lkeys:
try:
s = self.get_storer(k)
if s is not None:
keys.append(pprint_thing(s.pathname or k))
values.append(
pprint_thing(s or 'invalid_HDFStore node'))
except Exception as detail:
keys.append(k)
values.append("[invalid_HDFStore node: %s]"
% pprint_thing(detail))
output += adjoin(12, keys, values)
else:
output += 'Empty'
else:
output += "File is CLOSED"
return output
# private methods ######
def _check_if_open(self):
if not self.is_open:
raise ClosedFileError("{0} file is not open!".format(self._path))
def _validate_format(self, format, kwargs):
""" validate / deprecate formats; return the new kwargs """
kwargs = kwargs.copy()
# validate
try:
kwargs['format'] = _FORMAT_MAP[format.lower()]
except:
raise TypeError("invalid HDFStore format specified [{0}]"
.format(format))
return kwargs
def _create_storer(self, group, format=None, value=None, append=False,
**kwargs):
""" return a suitable class to operate """
def error(t):
raise TypeError(
"cannot properly create the storer for: [%s] [group->%s,"
"value->%s,format->%s,append->%s,kwargs->%s]"
% (t, group, type(value), format, append, kwargs)
)
pt = _ensure_decoded(getattr(group._v_attrs, 'pandas_type', None))
tt = _ensure_decoded(getattr(group._v_attrs, 'table_type', None))
# infer the pt from the passed value
if pt is None:
if value is None:
_tables()
if (getattr(group, 'table', None) or
isinstance(group, _table_mod.table.Table)):
pt = u('frame_table')
tt = u('generic_table')
else:
raise TypeError(
"cannot create a storer if the object is not existing "
"nor a value are passed")
else:
try:
pt = _TYPE_MAP[type(value)]
except:
error('_TYPE_MAP')
# we are actually a table
if format == 'table':
pt += u('_table')
# a storer node
if u('table') not in pt:
try:
return globals()[_STORER_MAP[pt]](self, group, **kwargs)
except:
error('_STORER_MAP')
# existing node (and must be a table)
if tt is None:
# if we are a writer, determin the tt
if value is not None:
if pt == u('series_table'):
index = getattr(value, 'index', None)
if index is not None:
if index.nlevels == 1:
tt = u('appendable_series')
elif index.nlevels > 1:
tt = u('appendable_multiseries')
elif pt == u('frame_table'):
index = getattr(value, 'index', None)
if index is not None:
if index.nlevels == 1:
tt = u('appendable_frame')
elif index.nlevels > 1:
tt = u('appendable_multiframe')
elif pt == u('wide_table'):
tt = u('appendable_panel')
elif pt == u('ndim_table'):
tt = u('appendable_ndim')
else:
# distiguish between a frame/table
tt = u('legacy_panel')
try:
fields = group.table._v_attrs.fields
if len(fields) == 1 and fields[0] == u('value'):
tt = u('legacy_frame')
except:
pass
try:
return globals()[_TABLE_MAP[tt]](self, group, **kwargs)
except:
error('_TABLE_MAP')
def _write_to_group(self, key, value, format, index=True, append=False,
complib=None, encoding=None, **kwargs):
group = self.get_node(key)
# remove the node if we are not appending
if group is not None and not append:
self._handle.remove_node(group, recursive=True)
group = None
# we don't want to store a table node at all if are object is 0-len
# as there are not dtypes
if getattr(value, 'empty', None) and (format == 'table' or append):
return
if group is None:
paths = key.split('/')
# recursively create the groups
path = '/'
for p in paths:
if not len(p):
continue
new_path = path
if not path.endswith('/'):
new_path += '/'
new_path += p
group = self.get_node(new_path)
if group is None:
group = self._handle.create_group(path, p)
path = new_path
s = self._create_storer(group, format, value, append=append,
encoding=encoding, **kwargs)
if append:
# raise if we are trying to append to a Fixed format,
# or a table that exists (and we are putting)
if (not s.is_table or
(s.is_table and format == 'fixed' and s.is_exists)):
raise ValueError('Can only append to Tables')
if not s.is_exists:
s.set_object_info()
else:
s.set_object_info()
if not s.is_table and complib:
raise ValueError(
'Compression not supported on Fixed format stores'
)
# write the object
s.write(obj=value, append=append, complib=complib, **kwargs)
if s.is_table and index:
s.create_index(columns=index)
def _read_group(self, group, **kwargs):
s = self._create_storer(group)
s.infer_axes()
return s.read(**kwargs)
def get_store(path, **kwargs):
""" Backwards compatible alias for ``HDFStore``
"""
warnings.warn(
"get_store is deprecated and be "
"removed in a future version\n"
"HDFStore(path, **kwargs) is the replacement",
FutureWarning,
stacklevel=6)
return HDFStore(path, **kwargs)
class TableIterator(object):
""" define the iteration interface on a table
Parameters
----------
store : the reference store
s : the refered storer
func : the function to execute the query
where : the where of the query
nrows : the rows to iterate on
start : the passed start value (default is None)
stop : the passed stop value (default is None)
iterator : boolean, whether to use the default iterator
chunksize : the passed chunking value (default is 50000)
auto_close : boolean, automatically close the store at the end of
iteration, default is False
kwargs : the passed kwargs
"""
def __init__(self, store, s, func, where, nrows, start=None, stop=None,
iterator=False, chunksize=None, auto_close=False):
self.store = store
self.s = s
self.func = func
self.where = where
# set start/stop if they are not set if we are a table
if self.s.is_table:
if nrows is None:
nrows = 0
if start is None:
start = 0
if stop is None:
stop = nrows
stop = min(nrows, stop)
self.nrows = nrows
self.start = start
self.stop = stop
self.coordinates = None
if iterator or chunksize is not None:
if chunksize is None:
chunksize = 100000
self.chunksize = int(chunksize)
else:
self.chunksize = None
self.auto_close = auto_close
def __iter__(self):
# iterate
current = self.start
while current < self.stop:
stop = min(current + self.chunksize, self.stop)
value = self.func(None, None, self.coordinates[current:stop])
current = stop
if value is None or not len(value):
continue
yield value
self.close()
def close(self):
if self.auto_close:
self.store.close()
def get_result(self, coordinates=False):
# return the actual iterator
if self.chunksize is not None:
if not self.s.is_table:
raise TypeError(
"can only use an iterator or chunksize on a table")
self.coordinates = self.s.read_coordinates(where=self.where)
return self
# if specified read via coordinates (necessary for multiple selections
if coordinates:
where = self.s.read_coordinates(where=self.where, start=self.start,
stop=self.stop)
else:
where = self.where
# directly return the result
results = self.func(self.start, self.stop, where)
self.close()
return results
class IndexCol(StringMixin):
""" an index column description class
Parameters
----------
axis : axis which I reference
values : the ndarray like converted values
kind : a string description of this type
typ : the pytables type
pos : the position in the pytables
"""
is_an_indexable = True
is_data_indexable = True
_info_fields = ['freq', 'tz', 'index_name']
def __init__(self, values=None, kind=None, typ=None, cname=None,
itemsize=None, name=None, axis=None, kind_attr=None,
pos=None, freq=None, tz=None, index_name=None, **kwargs):
self.values = values
self.kind = kind
self.typ = typ
self.itemsize = itemsize
self.name = name
self.cname = cname
self.kind_attr = kind_attr
self.axis = axis
self.pos = pos
self.freq = freq
self.tz = tz
self.index_name = index_name
self.table = None
self.meta = None
self.metadata = None
if name is not None:
self.set_name(name, kind_attr)
if pos is not None:
self.set_pos(pos)
def set_name(self, name, kind_attr=None):
""" set the name of this indexer """
self.name = name
self.kind_attr = kind_attr or "%s_kind" % name
if self.cname is None:
self.cname = name
return self
def set_axis(self, axis):
""" set the axis over which I index """
self.axis = axis
return self
def set_pos(self, pos):
""" set the position of this column in the Table """
self.pos = pos
if pos is not None and self.typ is not None:
self.typ._v_pos = pos
return self
def set_table(self, table):
self.table = table
return self
def __unicode__(self):
temp = tuple(
map(pprint_thing,
(self.name,
self.cname,
self.axis,
self.pos,
self.kind)))
return "name->%s,cname->%s,axis->%s,pos->%s,kind->%s" % temp
def __eq__(self, other):
""" compare 2 col items """
return all([getattr(self, a, None) == getattr(other, a, None)
for a in ['name', 'cname', 'axis', 'pos']])
def __ne__(self, other):
return not self.__eq__(other)
@property
def is_indexed(self):
""" return whether I am an indexed column """
try:
return getattr(self.table.cols, self.cname).is_indexed
except:
False
def copy(self):
new_self = copy.copy(self)
return new_self
def infer(self, handler):
"""infer this column from the table: create and return a new object"""
table = handler.table
new_self = self.copy()
new_self.set_table(table)
new_self.get_attr()
new_self.read_metadata(handler)
return new_self
def convert(self, values, nan_rep, encoding):
""" set the values from this selection: take = take ownership """
# values is a recarray
if values.dtype.fields is not None:
values = values[self.cname]
values = _maybe_convert(values, self.kind, encoding)
kwargs = dict()
if self.freq is not None:
kwargs['freq'] = _ensure_decoded(self.freq)
if self.index_name is not None:
kwargs['name'] = _ensure_decoded(self.index_name)
try:
self.values = Index(values, **kwargs)
except:
# if the output freq is different that what we recorded,
# it should be None (see also 'doc example part 2')
if 'freq' in kwargs:
kwargs['freq'] = None
self.values = Index(values, **kwargs)
self.values = _set_tz(self.values, self.tz)
return self
def take_data(self):
""" return the values & release the memory """
self.values, values = None, self.values
return values
@property
def attrs(self):
return self.table._v_attrs
@property
def description(self):
return self.table.description
@property
def col(self):
""" return my current col description """
return getattr(self.description, self.cname, None)
@property
def cvalues(self):
""" return my cython values """
return self.values
def __iter__(self):
return iter(self.values)
def maybe_set_size(self, min_itemsize=None, **kwargs):
""" maybe set a string col itemsize:
min_itemsize can be an interger or a dict with this columns name
with an integer size """
if _ensure_decoded(self.kind) == u('string'):
if isinstance(min_itemsize, dict):
min_itemsize = min_itemsize.get(self.name)
if min_itemsize is not None and self.typ.itemsize < min_itemsize:
self.typ = _tables(
).StringCol(itemsize=min_itemsize, pos=self.pos)
def validate(self, handler, append, **kwargs):
self.validate_names()
def validate_names(self):
pass
def validate_and_set(self, handler, append, **kwargs):
self.set_table(handler.table)
self.validate_col()
self.validate_attr(append)
self.validate_metadata(handler)
self.write_metadata(handler)
self.set_attr()
def validate_col(self, itemsize=None):
""" validate this column: return the compared against itemsize """
# validate this column for string truncation (or reset to the max size)
if _ensure_decoded(self.kind) == u('string'):
c = self.col
if c is not None:
if itemsize is None:
itemsize = self.itemsize
if c.itemsize < itemsize:
raise ValueError(
"Trying to store a string with len [%s] in [%s] "
"column but\nthis column has a limit of [%s]!\n"
"Consider using min_itemsize to preset the sizes on "
"these columns" % (itemsize, self.cname, c.itemsize))
return c.itemsize
return None
def validate_attr(self, append):
# check for backwards incompatibility
if append:
existing_kind = getattr(self.attrs, self.kind_attr, None)
if existing_kind is not None and existing_kind != self.kind:
raise TypeError("incompatible kind in col [%s - %s]" %
(existing_kind, self.kind))
def update_info(self, info):
""" set/update the info for this indexable with the key/value
if there is a conflict raise/warn as needed """
for key in self._info_fields:
value = getattr(self, key, None)
idx = _get_info(info, self.name)
existing_value = idx.get(key)
if key in idx and value is not None and existing_value != value:
# frequency/name just warn
if key in ['freq', 'index_name']:
ws = attribute_conflict_doc % (key, existing_value, value)
warnings.warn(ws, AttributeConflictWarning, stacklevel=6)
# reset
idx[key] = None
setattr(self, key, None)
else:
raise ValueError(
"invalid info for [%s] for [%s], existing_value [%s] "
"conflicts with new value [%s]"
% (self.name, key, existing_value, value))
else:
if value is not None or existing_value is not None:
idx[key] = value
return self
def set_info(self, info):
""" set my state from the passed info """
idx = info.get(self.name)
if idx is not None:
self.__dict__.update(idx)
def get_attr(self):
""" set the kind for this colummn """
self.kind = getattr(self.attrs, self.kind_attr, None)
def set_attr(self):
""" set the kind for this colummn """
setattr(self.attrs, self.kind_attr, self.kind)
def read_metadata(self, handler):
""" retrieve the metadata for this columns """
self.metadata = handler.read_metadata(self.cname)
def validate_metadata(self, handler):
""" validate that kind=category does not change the categories """
if self.meta == 'category':
new_metadata = self.metadata
cur_metadata = handler.read_metadata(self.cname)
if new_metadata is not None and cur_metadata is not None \
and not array_equivalent(new_metadata, cur_metadata):
raise ValueError("cannot append a categorical with "
"different categories to the existing")
def write_metadata(self, handler):
""" set the meta data """
if self.metadata is not None:
handler.write_metadata(self.cname, self.metadata)
class GenericIndexCol(IndexCol):
""" an index which is not represented in the data of the table """
@property
def is_indexed(self):
return False
def convert(self, values, nan_rep, encoding):
""" set the values from this selection: take = take ownership """
self.values = Int64Index(np.arange(self.table.nrows))
return self
def get_attr(self):
pass
def set_attr(self):
pass
class DataCol(IndexCol):
""" a data holding column, by definition this is not indexable
Parameters
----------
data : the actual data
cname : the column name in the table to hold the data (typically
values)
meta : a string description of the metadata
metadata : the actual metadata
"""
is_an_indexable = False
is_data_indexable = False
_info_fields = ['tz', 'ordered']
@classmethod
def create_for_block(
cls, i=None, name=None, cname=None, version=None, **kwargs):
""" return a new datacol with the block i """
if cname is None:
cname = name or 'values_block_%d' % i
if name is None:
name = cname
# prior to 0.10.1, we named values blocks like: values_block_0 an the
# name values_0
try:
if version[0] == 0 and version[1] <= 10 and version[2] == 0:
m = re.search("values_block_(\d+)", name)
if m:
name = "values_%s" % m.groups()[0]
except:
pass
return cls(name=name, cname=cname, **kwargs)
def __init__(self, values=None, kind=None, typ=None,
cname=None, data=None, meta=None, metadata=None,
block=None, **kwargs):
super(DataCol, self).__init__(values=values, kind=kind, typ=typ,
cname=cname, **kwargs)
self.dtype = None
self.dtype_attr = u("%s_dtype" % self.name)
self.meta = meta
self.meta_attr = u("%s_meta" % self.name)
self.set_data(data)
self.set_metadata(metadata)
def __unicode__(self):
temp = tuple(
map(pprint_thing,
(self.name,
self.cname,
self.dtype,
self.kind,
self.shape)))
return "name->%s,cname->%s,dtype->%s,kind->%s,shape->%s" % temp
def __eq__(self, other):
""" compare 2 col items """
return all([getattr(self, a, None) == getattr(other, a, None)
for a in ['name', 'cname', 'dtype', 'pos']])
def set_data(self, data, dtype=None):
self.data = data
if data is not None:
if dtype is not None:
self.dtype = dtype
self.set_kind()
elif self.dtype is None:
self.dtype = data.dtype.name
self.set_kind()
def take_data(self):
""" return the data & release the memory """
self.data, data = None, self.data
return data
def set_metadata(self, metadata):
""" record the metadata """
if metadata is not None:
metadata = np.array(metadata, copy=False).ravel()
self.metadata = metadata
def set_kind(self):
# set my kind if we can
if self.dtype is not None:
dtype = _ensure_decoded(self.dtype)
if dtype.startswith(u('string')) or dtype.startswith(u('bytes')):
self.kind = 'string'
elif dtype.startswith(u('float')):
self.kind = 'float'
elif dtype.startswith(u('complex')):
self.kind = 'complex'
elif dtype.startswith(u('int')) or dtype.startswith(u('uint')):
self.kind = 'integer'
elif dtype.startswith(u('date')):
self.kind = 'datetime'
elif dtype.startswith(u('timedelta')):
self.kind = 'timedelta'
elif dtype.startswith(u('bool')):
self.kind = 'bool'
else:
raise AssertionError(
"cannot interpret dtype of [%s] in [%s]" % (dtype, self))
# set my typ if we need
if self.typ is None:
self.typ = getattr(self.description, self.cname, None)
def set_atom(self, block, block_items, existing_col, min_itemsize,
nan_rep, info, encoding=None, **kwargs):
""" create and setup my atom from the block b """
self.values = list(block_items)
# short-cut certain block types
if block.is_categorical:
return self.set_atom_categorical(block, items=block_items,
info=info)
elif block.is_datetimetz:
return self.set_atom_datetime64tz(block, info=info)
elif block.is_datetime:
return self.set_atom_datetime64(block)
elif block.is_timedelta:
return self.set_atom_timedelta64(block)
elif block.is_complex:
return self.set_atom_complex(block)
dtype = block.dtype.name
inferred_type = lib.infer_dtype(block.values)
if inferred_type == 'date':
raise TypeError(
"[date] is not implemented as a table column")
elif inferred_type == 'datetime':
# after 8260
# this only would be hit for a mutli-timezone dtype
# which is an error
raise TypeError(
"too many timezones in this block, create separate "
"data columns"
)
elif inferred_type == 'unicode':
raise TypeError(
"[unicode] is not implemented as a table column")
# this is basically a catchall; if say a datetime64 has nans then will
# end up here ###
elif inferred_type == 'string' or dtype == 'object':
self.set_atom_string(
block, block_items,
existing_col,
min_itemsize,
nan_rep,
encoding)
# set as a data block
else:
self.set_atom_data(block)
def get_atom_string(self, block, itemsize):
return _tables().StringCol(itemsize=itemsize, shape=block.shape[0])
def set_atom_string(self, block, block_items, existing_col, min_itemsize,
nan_rep, encoding):
# fill nan items with myself, don't disturb the blocks by
# trying to downcast
block = block.fillna(nan_rep, downcast=False)
if isinstance(block, list):
block = block[0]
data = block.values
# see if we have a valid string type
inferred_type = lib.infer_dtype(data.ravel())
if inferred_type != 'string':
# we cannot serialize this data, so report an exception on a column
# by column basis
for i, item in enumerate(block_items):
col = block.iget(i)
inferred_type = lib.infer_dtype(col.ravel())
if inferred_type != 'string':
raise TypeError(
"Cannot serialize the column [%s] because\n"
"its data contents are [%s] object dtype"
% (item, inferred_type)
)
# itemsize is the maximum length of a string (along any dimension)
data_converted = _convert_string_array(data, encoding)
itemsize = data_converted.itemsize
# specified min_itemsize?
if isinstance(min_itemsize, dict):
min_itemsize = int(min_itemsize.get(
self.name) or min_itemsize.get('values') or 0)
itemsize = max(min_itemsize or 0, itemsize)
# check for column in the values conflicts
if existing_col is not None:
eci = existing_col.validate_col(itemsize)
if eci > itemsize:
itemsize = eci
self.itemsize = itemsize
self.kind = 'string'
self.typ = self.get_atom_string(block, itemsize)
self.set_data(data_converted.astype('|S%d' % itemsize, copy=False))
def get_atom_coltype(self, kind=None):
""" return the PyTables column class for this column """
if kind is None:
kind = self.kind
if self.kind.startswith('uint'):
col_name = "UInt%sCol" % kind[4:]
else:
col_name = "%sCol" % kind.capitalize()
return getattr(_tables(), col_name)
def get_atom_data(self, block, kind=None):
return self.get_atom_coltype(kind=kind)(shape=block.shape[0])
def set_atom_complex(self, block):
self.kind = block.dtype.name
itemsize = int(self.kind.split('complex')[-1]) // 8
self.typ = _tables().ComplexCol(
itemsize=itemsize, shape=block.shape[0])
self.set_data(block.values.astype(self.typ.type, copy=False))
def set_atom_data(self, block):
self.kind = block.dtype.name
self.typ = self.get_atom_data(block)
self.set_data(block.values.astype(self.typ.type, copy=False))
def set_atom_categorical(self, block, items, info=None, values=None):
# currently only supports a 1-D categorical
# in a 1-D block
values = block.values
codes = values.codes
self.kind = 'integer'
self.dtype = codes.dtype.name
if values.ndim > 1:
raise NotImplementedError("only support 1-d categoricals")
if len(items) > 1:
raise NotImplementedError("only support single block categoricals")
# write the codes; must be in a block shape
self.ordered = values.ordered
self.typ = self.get_atom_data(block, kind=codes.dtype.name)
self.set_data(_block_shape(codes))
# write the categories
self.meta = 'category'
self.set_metadata(block.values.categories)
# update the info
self.update_info(info)
def get_atom_datetime64(self, block):
return _tables().Int64Col(shape=block.shape[0])
def set_atom_datetime64(self, block, values=None):
self.kind = 'datetime64'
self.typ = self.get_atom_datetime64(block)
if values is None:
values = block.values.view('i8')
self.set_data(values, 'datetime64')
def set_atom_datetime64tz(self, block, info, values=None):
if values is None:
values = block.values
# convert this column to i8 in UTC, and save the tz
values = values.asi8.reshape(block.shape)
# store a converted timezone
self.tz = _get_tz(block.values.tz)
self.update_info(info)
self.kind = 'datetime64'
self.typ = self.get_atom_datetime64(block)
self.set_data(values, 'datetime64')
def get_atom_timedelta64(self, block):
return _tables().Int64Col(shape=block.shape[0])
def set_atom_timedelta64(self, block, values=None):
self.kind = 'timedelta64'
self.typ = self.get_atom_timedelta64(block)
if values is None:
values = block.values.view('i8')
self.set_data(values, 'timedelta64')
@property
def shape(self):
return getattr(self.data, 'shape', None)
@property
def cvalues(self):
""" return my cython values """
return self.data
def validate_attr(self, append):
"""validate that we have the same order as the existing & same dtype"""
if append:
existing_fields = getattr(self.attrs, self.kind_attr, None)
if (existing_fields is not None and
existing_fields != list(self.values)):
raise ValueError("appended items do not match existing items"
" in table!")
existing_dtype = getattr(self.attrs, self.dtype_attr, None)
if (existing_dtype is not None and
existing_dtype != self.dtype):
raise ValueError("appended items dtype do not match existing "
"items dtype in table!")
def convert(self, values, nan_rep, encoding):
"""set the data from this selection (and convert to the correct dtype
if we can)
"""
# values is a recarray
if values.dtype.fields is not None:
values = values[self.cname]
self.set_data(values)
# use the meta if needed
meta = _ensure_decoded(self.meta)
# convert to the correct dtype
if self.dtype is not None:
dtype = _ensure_decoded(self.dtype)
# reverse converts
if dtype == u('datetime64'):
# recreate with tz if indicated
self.data = _set_tz(self.data, self.tz, coerce=True)
elif dtype == u('timedelta64'):
self.data = np.asarray(self.data, dtype='m8[ns]')
elif dtype == u('date'):
try:
self.data = np.asarray(
[date.fromordinal(v) for v in self.data], dtype=object)
except ValueError:
self.data = np.asarray(
[date.fromtimestamp(v) for v in self.data],
dtype=object)
elif dtype == u('datetime'):
self.data = np.asarray(
[datetime.fromtimestamp(v) for v in self.data],
dtype=object)
elif meta == u('category'):
# we have a categorical
categories = self.metadata
codes = self.data.ravel()
# if we have stored a NaN in the categories
# then strip it; in theory we could have BOTH
# -1s in the codes and nulls :<
mask = isnull(categories)
if mask.any():
categories = categories[~mask]
codes[codes != -1] -= mask.astype(int).cumsum().values
self.data = Categorical.from_codes(codes,
categories=categories,
ordered=self.ordered)
else:
try:
self.data = self.data.astype(dtype, copy=False)
except:
self.data = self.data.astype('O', copy=False)
# convert nans / decode
if _ensure_decoded(self.kind) == u('string'):
self.data = _unconvert_string_array(
self.data, nan_rep=nan_rep, encoding=encoding)
return self
def get_attr(self):
""" get the data for this colummn """
self.values = getattr(self.attrs, self.kind_attr, None)
self.dtype = getattr(self.attrs, self.dtype_attr, None)
self.meta = getattr(self.attrs, self.meta_attr, None)
self.set_kind()
def set_attr(self):
""" set the data for this colummn """
setattr(self.attrs, self.kind_attr, self.values)
setattr(self.attrs, self.meta_attr, self.meta)
if self.dtype is not None:
setattr(self.attrs, self.dtype_attr, self.dtype)
class DataIndexableCol(DataCol):
""" represent a data column that can be indexed """
is_data_indexable = True
def validate_names(self):
if not Index(self.values).is_object():
raise ValueError("cannot have non-object label DataIndexableCol")
def get_atom_string(self, block, itemsize):
return _tables().StringCol(itemsize=itemsize)
def get_atom_data(self, block, kind=None):
return self.get_atom_coltype(kind=kind)()
def get_atom_datetime64(self, block):
return _tables().Int64Col()
def get_atom_timedelta64(self, block):
return _tables().Int64Col()
class GenericDataIndexableCol(DataIndexableCol):
""" represent a generic pytables data column """
def get_attr(self):
pass
class Fixed(StringMixin):
""" represent an object in my store
facilitate read/write of various types of objects
this is an abstract base class
Parameters
----------
parent : my parent HDFStore
group : the group node where the table resides
"""
pandas_kind = None
obj_type = None
ndim = None
is_table = False
def __init__(self, parent, group, encoding=None, **kwargs):
self.parent = parent
self.group = group
self.encoding = _ensure_encoding(encoding)
self.set_version()
@property
def is_old_version(self):
return (self.version[0] <= 0 and self.version[1] <= 10 and
self.version[2] < 1)
def set_version(self):
""" compute and set our version """
version = _ensure_decoded(
getattr(self.group._v_attrs, 'pandas_version', None))
try:
self.version = tuple([int(x) for x in version.split('.')])
if len(self.version) == 2:
self.version = self.version + (0,)
except:
self.version = (0, 0, 0)
@property
def pandas_type(self):
return _ensure_decoded(getattr(self.group._v_attrs,
'pandas_type', None))
@property
def format_type(self):
return 'fixed'
def __unicode__(self):
""" return a pretty representation of myself """
self.infer_axes()
s = self.shape
if s is not None:
if isinstance(s, (list, tuple)):
s = "[%s]" % ','.join([pprint_thing(x) for x in s])
return "%-12.12s (shape->%s)" % (self.pandas_type, s)
return self.pandas_type
def set_object_info(self):
""" set my pandas type & version """
self.attrs.pandas_type = str(self.pandas_kind)
self.attrs.pandas_version = str(_version)
self.set_version()
def copy(self):
new_self = copy.copy(self)
return new_self
@property
def storage_obj_type(self):
return self.obj_type
@property
def shape(self):
return self.nrows
@property
def pathname(self):
return self.group._v_pathname
@property
def _handle(self):
return self.parent._handle
@property
def _filters(self):
return self.parent._filters
@property
def _complevel(self):
return self.parent._complevel
@property
def _fletcher32(self):
return self.parent._fletcher32
@property
def _complib(self):
return self.parent._complib
@property
def attrs(self):
return self.group._v_attrs
def set_attrs(self):
""" set our object attributes """
pass
def get_attrs(self):
""" get our object attributes """
pass
@property
def storable(self):
""" return my storable """
return self.group
@property
def is_exists(self):
return False
@property
def nrows(self):
return getattr(self.storable, 'nrows', None)
def validate(self, other):
""" validate against an existing storable """
if other is None:
return
return True
def validate_version(self, where=None):
""" are we trying to operate on an old version? """
return True
def infer_axes(self):
""" infer the axes of my storer
return a boolean indicating if we have a valid storer or not """
s = self.storable
if s is None:
return False
self.get_attrs()
return True
def read(self, **kwargs):
raise NotImplementedError(
"cannot read on an abstract storer: subclasses should implement")
def write(self, **kwargs):
raise NotImplementedError(
"cannot write on an abstract storer: sublcasses should implement")
def delete(self, where=None, start=None, stop=None, **kwargs):
"""
support fully deleting the node in its entirety (only) - where
specification must be None
"""
if where is None and start is None and stop is None:
self._handle.remove_node(self.group, recursive=True)
return None
raise TypeError("cannot delete on an abstract storer")
class GenericFixed(Fixed):
""" a generified fixed version """
_index_type_map = {DatetimeIndex: 'datetime', PeriodIndex: 'period'}
_reverse_index_map = dict([(v, k)
for k, v in compat.iteritems(_index_type_map)])
attributes = []
# indexer helpders
def _class_to_alias(self, cls):
return self._index_type_map.get(cls, '')
def _alias_to_class(self, alias):
if isinstance(alias, type): # pragma: no cover
# compat: for a short period of time master stored types
return alias
return self._reverse_index_map.get(alias, Index)
def _get_index_factory(self, klass):
if klass == DatetimeIndex:
def f(values, freq=None, tz=None):
return DatetimeIndex._simple_new(values, None, freq=freq,
tz=tz)
return f
elif klass == PeriodIndex:
def f(values, freq=None, tz=None):
return PeriodIndex._simple_new(values, None, freq=freq)
return f
return klass
def validate_read(self, kwargs):
"""
remove table keywords from kwargs and return
raise if any keywords are passed which are not-None
"""
kwargs = copy.copy(kwargs)
columns = kwargs.pop('columns', None)
if columns is not None:
raise TypeError("cannot pass a column specification when reading "
"a Fixed format store. this store must be "
"selected in its entirety")
where = kwargs.pop('where', None)
if where is not None:
raise TypeError("cannot pass a where specification when reading "
"from a Fixed format store. this store must be "
"selected in its entirety")
return kwargs
@property
def is_exists(self):
return True
def set_attrs(self):
""" set our object attributes """
self.attrs.encoding = self.encoding
def get_attrs(self):
""" retrieve our attributes """
self.encoding = _ensure_encoding(getattr(self.attrs, 'encoding', None))
for n in self.attributes:
setattr(self, n, _ensure_decoded(getattr(self.attrs, n, None)))
def write(self, obj, **kwargs):
self.set_attrs()
def read_array(self, key, start=None, stop=None):
""" read an array for the specified node (off of group """
import tables
node = getattr(self.group, key)
data = node[start:stop]
attrs = node._v_attrs
transposed = getattr(attrs, 'transposed', False)
if isinstance(node, tables.VLArray):
ret = data[0]
else:
dtype = getattr(attrs, 'value_type', None)
shape = getattr(attrs, 'shape', None)
if shape is not None:
# length 0 axis
ret = np.empty(shape, dtype=dtype)
else:
ret = data
if dtype == u('datetime64'):
# reconstruct a timezone if indicated
ret = _set_tz(ret, getattr(attrs, 'tz', None), coerce=True)
elif dtype == u('timedelta64'):
ret = np.asarray(ret, dtype='m8[ns]')
if transposed:
return ret.T
else:
return ret
def read_index(self, key, **kwargs):
variety = _ensure_decoded(getattr(self.attrs, '%s_variety' % key))
if variety == u('multi'):
return self.read_multi_index(key, **kwargs)
elif variety == u('block'):
return self.read_block_index(key, **kwargs)
elif variety == u('sparseint'):
return self.read_sparse_intindex(key, **kwargs)
elif variety == u('regular'):
_, index = self.read_index_node(getattr(self.group, key), **kwargs)
return index
else: # pragma: no cover
raise TypeError('unrecognized index variety: %s' % variety)
def write_index(self, key, index):
if isinstance(index, MultiIndex):
setattr(self.attrs, '%s_variety' % key, 'multi')
self.write_multi_index(key, index)
elif isinstance(index, BlockIndex):
setattr(self.attrs, '%s_variety' % key, 'block')
self.write_block_index(key, index)
elif isinstance(index, IntIndex):
setattr(self.attrs, '%s_variety' % key, 'sparseint')
self.write_sparse_intindex(key, index)
else:
setattr(self.attrs, '%s_variety' % key, 'regular')
converted = _convert_index(index, self.encoding,
self.format_type).set_name('index')
self.write_array(key, converted.values)
node = getattr(self.group, key)
node._v_attrs.kind = converted.kind
node._v_attrs.name = index.name
if isinstance(index, (DatetimeIndex, PeriodIndex)):
node._v_attrs.index_class = self._class_to_alias(type(index))
if hasattr(index, 'freq'):
node._v_attrs.freq = index.freq
if hasattr(index, 'tz') and index.tz is not None:
node._v_attrs.tz = _get_tz(index.tz)
def write_block_index(self, key, index):
self.write_array('%s_blocs' % key, index.blocs)
self.write_array('%s_blengths' % key, index.blengths)
setattr(self.attrs, '%s_length' % key, index.length)
def read_block_index(self, key, **kwargs):
length = getattr(self.attrs, '%s_length' % key)
blocs = self.read_array('%s_blocs' % key, **kwargs)
blengths = self.read_array('%s_blengths' % key, **kwargs)
return BlockIndex(length, blocs, blengths)
def write_sparse_intindex(self, key, index):
self.write_array('%s_indices' % key, index.indices)
setattr(self.attrs, '%s_length' % key, index.length)
def read_sparse_intindex(self, key, **kwargs):
length = getattr(self.attrs, '%s_length' % key)
indices = self.read_array('%s_indices' % key, **kwargs)
return IntIndex(length, indices)
def write_multi_index(self, key, index):
setattr(self.attrs, '%s_nlevels' % key, index.nlevels)
for i, (lev, lab, name) in enumerate(zip(index.levels,
index.labels,
index.names)):
# write the level
level_key = '%s_level%d' % (key, i)
conv_level = _convert_index(lev, self.encoding,
self.format_type).set_name(level_key)
self.write_array(level_key, conv_level.values)
node = getattr(self.group, level_key)
node._v_attrs.kind = conv_level.kind
node._v_attrs.name = name
# write the name
setattr(node._v_attrs, '%s_name%d' % (key, i), name)
# write the labels
label_key = '%s_label%d' % (key, i)
self.write_array(label_key, lab)
def read_multi_index(self, key, **kwargs):
nlevels = getattr(self.attrs, '%s_nlevels' % key)
levels = []
labels = []
names = []
for i in range(nlevels):
level_key = '%s_level%d' % (key, i)
name, lev = self.read_index_node(getattr(self.group, level_key),
**kwargs)
levels.append(lev)
names.append(name)
label_key = '%s_label%d' % (key, i)
lab = self.read_array(label_key, **kwargs)
labels.append(lab)
return MultiIndex(levels=levels, labels=labels, names=names,
verify_integrity=True)
def read_index_node(self, node, start=None, stop=None):
data = node[start:stop]
# If the index was an empty array write_array_empty() will
# have written a sentinel. Here we relace it with the original.
if ('shape' in node._v_attrs and
self._is_empty_array(getattr(node._v_attrs, 'shape'))):
data = np.empty(getattr(node._v_attrs, 'shape'),
dtype=getattr(node._v_attrs, 'value_type'))
kind = _ensure_decoded(node._v_attrs.kind)
name = None
if 'name' in node._v_attrs:
name = _ensure_str(node._v_attrs.name)
index_class = self._alias_to_class(getattr(node._v_attrs,
'index_class', ''))
factory = self._get_index_factory(index_class)
kwargs = {}
if u('freq') in node._v_attrs:
kwargs['freq'] = node._v_attrs['freq']
if u('tz') in node._v_attrs:
kwargs['tz'] = node._v_attrs['tz']
if kind in (u('date'), u('datetime')):
index = factory(_unconvert_index(data, kind,
encoding=self.encoding),
dtype=object, **kwargs)
else:
index = factory(_unconvert_index(data, kind,
encoding=self.encoding), **kwargs)
index.name = name
return name, index
def write_array_empty(self, key, value):
""" write a 0-len array """
# ugly hack for length 0 axes
arr = np.empty((1,) * value.ndim)
self._handle.create_array(self.group, key, arr)
getattr(self.group, key)._v_attrs.value_type = str(value.dtype)
getattr(self.group, key)._v_attrs.shape = value.shape
def _is_empty_array(self, shape):
"""Returns true if any axis is zero length."""
return any(x == 0 for x in shape)
def write_array(self, key, value, items=None):
if key in self.group:
self._handle.remove_node(self.group, key)
# Transform needed to interface with pytables row/col notation
empty_array = self._is_empty_array(value.shape)
transposed = False
if is_categorical_dtype(value):
raise NotImplementedError('Cannot store a category dtype in '
'a HDF5 dataset that uses format='
'"fixed". Use format="table".')
if not empty_array:
value = value.T
transposed = True
if self._filters is not None:
atom = None
try:
# get the atom for this datatype
atom = _tables().Atom.from_dtype(value.dtype)
except ValueError:
pass
if atom is not None:
# create an empty chunked array and fill it from value
if not empty_array:
ca = self._handle.create_carray(self.group, key, atom,
value.shape,
filters=self._filters)
ca[:] = value
getattr(self.group, key)._v_attrs.transposed = transposed
else:
self.write_array_empty(key, value)
return
if value.dtype.type == np.object_:
# infer the type, warn if we have a non-string type here (for
# performance)
inferred_type = lib.infer_dtype(value.ravel())
if empty_array:
pass
elif inferred_type == 'string':
pass
else:
try:
items = list(items)
except:
pass
ws = performance_doc % (inferred_type, key, items)
warnings.warn(ws, PerformanceWarning, stacklevel=7)
vlarr = self._handle.create_vlarray(self.group, key,
_tables().ObjectAtom())
vlarr.append(value)
else:
if empty_array:
self.write_array_empty(key, value)
else:
if is_datetime64_dtype(value.dtype):
self._handle.create_array(
self.group, key, value.view('i8'))
getattr(
self.group, key)._v_attrs.value_type = 'datetime64'
elif is_datetime64tz_dtype(value.dtype):
# store as UTC
# with a zone
self._handle.create_array(self.group, key,
value.asi8)
node = getattr(self.group, key)
node._v_attrs.tz = _get_tz(value.tz)
node._v_attrs.value_type = 'datetime64'
elif is_timedelta64_dtype(value.dtype):
self._handle.create_array(
self.group, key, value.view('i8'))
getattr(
self.group, key)._v_attrs.value_type = 'timedelta64'
else:
self._handle.create_array(self.group, key, value)
getattr(self.group, key)._v_attrs.transposed = transposed
class LegacyFixed(GenericFixed):
def read_index_legacy(self, key, start=None, stop=None):
node = getattr(self.group, key)
data = node[start:stop]
kind = node._v_attrs.kind
return _unconvert_index_legacy(data, kind, encoding=self.encoding)
class LegacySeriesFixed(LegacyFixed):
def read(self, **kwargs):
kwargs = self.validate_read(kwargs)
index = self.read_index_legacy('index')
values = self.read_array('values')
return Series(values, index=index)
class LegacyFrameFixed(LegacyFixed):
def read(self, **kwargs):
kwargs = self.validate_read(kwargs)
index = self.read_index_legacy('index')
columns = self.read_index_legacy('columns')
values = self.read_array('values')
return DataFrame(values, index=index, columns=columns)
class SeriesFixed(GenericFixed):
pandas_kind = u('series')
attributes = ['name']
@property
def shape(self):
try:
return len(getattr(self.group, 'values')),
except:
return None
def read(self, **kwargs):
kwargs = self.validate_read(kwargs)
index = self.read_index('index', **kwargs)
values = self.read_array('values', **kwargs)
return Series(values, index=index, name=self.name)
def write(self, obj, **kwargs):
super(SeriesFixed, self).write(obj, **kwargs)
self.write_index('index', obj.index)
self.write_array('values', obj.values)
self.attrs.name = obj.name
class SparseFixed(GenericFixed):
def validate_read(self, kwargs):
"""
we don't support start, stop kwds in Sparse
"""
kwargs = super(SparseFixed, self).validate_read(kwargs)
if 'start' in kwargs or 'stop' in kwargs:
raise NotImplementedError("start and/or stop are not supported "
"in fixed Sparse reading")
return kwargs
class SparseSeriesFixed(SparseFixed):
pandas_kind = u('sparse_series')
attributes = ['name', 'fill_value', 'kind']
def read(self, **kwargs):
kwargs = self.validate_read(kwargs)
index = self.read_index('index')
sp_values = self.read_array('sp_values')
sp_index = self.read_index('sp_index')
return SparseSeries(sp_values, index=index, sparse_index=sp_index,
kind=self.kind or u('block'),
fill_value=self.fill_value,
name=self.name)
def write(self, obj, **kwargs):
super(SparseSeriesFixed, self).write(obj, **kwargs)
self.write_index('index', obj.index)
self.write_index('sp_index', obj.sp_index)
self.write_array('sp_values', obj.sp_values)
self.attrs.name = obj.name
self.attrs.fill_value = obj.fill_value
self.attrs.kind = obj.kind
class SparseFrameFixed(SparseFixed):
pandas_kind = u('sparse_frame')
attributes = ['default_kind', 'default_fill_value']
def read(self, **kwargs):
kwargs = self.validate_read(kwargs)
columns = self.read_index('columns')
sdict = {}
for c in columns:
key = 'sparse_series_%s' % c
s = SparseSeriesFixed(self.parent, getattr(self.group, key))
s.infer_axes()
sdict[c] = s.read()
return SparseDataFrame(sdict, columns=columns,
default_kind=self.default_kind,
default_fill_value=self.default_fill_value)
def write(self, obj, **kwargs):
""" write it as a collection of individual sparse series """
super(SparseFrameFixed, self).write(obj, **kwargs)
for name, ss in compat.iteritems(obj):
key = 'sparse_series_%s' % name
if key not in self.group._v_children:
node = self._handle.create_group(self.group, key)
else:
node = getattr(self.group, key)
s = SparseSeriesFixed(self.parent, node)
s.write(ss)
self.attrs.default_fill_value = obj.default_fill_value
self.attrs.default_kind = obj.default_kind
self.write_index('columns', obj.columns)
class BlockManagerFixed(GenericFixed):
attributes = ['ndim', 'nblocks']
is_shape_reversed = False
@property
def shape(self):
try:
ndim = self.ndim
# items
items = 0
for i in range(self.nblocks):
node = getattr(self.group, 'block%d_items' % i)
shape = getattr(node, 'shape', None)
if shape is not None:
items += shape[0]
# data shape
node = getattr(self.group, 'block0_values')
shape = getattr(node, 'shape', None)
if shape is not None:
shape = list(shape[0:(ndim - 1)])
else:
shape = []
shape.append(items)
# hacky - this works for frames, but is reversed for panels
if self.is_shape_reversed:
shape = shape[::-1]
return shape
except:
return None
def read(self, start=None, stop=None, **kwargs):
# start, stop applied to rows, so 0th axis only
kwargs = self.validate_read(kwargs)
select_axis = self.obj_type()._get_block_manager_axis(0)
axes = []
for i in range(self.ndim):
_start, _stop = (start, stop) if i == select_axis else (None, None)
ax = self.read_index('axis%d' % i, start=_start, stop=_stop)
axes.append(ax)
items = axes[0]
blocks = []
for i in range(self.nblocks):
blk_items = self.read_index('block%d_items' % i)
values = self.read_array('block%d_values' % i,
start=_start, stop=_stop)
blk = make_block(values,
placement=items.get_indexer(blk_items))
blocks.append(blk)
return self.obj_type(BlockManager(blocks, axes))
def write(self, obj, **kwargs):
super(BlockManagerFixed, self).write(obj, **kwargs)
data = obj._data
if not data.is_consolidated():
data = data.consolidate()
self.attrs.ndim = data.ndim
for i, ax in enumerate(data.axes):
if i == 0:
if not ax.is_unique:
raise ValueError(
"Columns index has to be unique for fixed format")
self.write_index('axis%d' % i, ax)
# Supporting mixed-type DataFrame objects...nontrivial
self.attrs.nblocks = len(data.blocks)
for i, blk in enumerate(data.blocks):
# I have no idea why, but writing values before items fixed #2299
blk_items = data.items.take(blk.mgr_locs)
self.write_array('block%d_values' % i, blk.values, items=blk_items)
self.write_index('block%d_items' % i, blk_items)
class FrameFixed(BlockManagerFixed):
pandas_kind = u('frame')
obj_type = DataFrame
class PanelFixed(BlockManagerFixed):
pandas_kind = u('wide')
obj_type = Panel
is_shape_reversed = True
def write(self, obj, **kwargs):
obj._consolidate_inplace()
return super(PanelFixed, self).write(obj, **kwargs)
class Table(Fixed):
""" represent a table:
facilitate read/write of various types of tables
Attrs in Table Node
-------------------
These are attributes that are store in the main table node, they are
necessary to recreate these tables when read back in.
index_axes : a list of tuples of the (original indexing axis and
index column)
non_index_axes: a list of tuples of the (original index axis and
columns on a non-indexing axis)
values_axes : a list of the columns which comprise the data of this
table
data_columns : a list of the columns that we are allowing indexing
(these become single columns in values_axes), or True to force all
columns
nan_rep : the string to use for nan representations for string
objects
levels : the names of levels
metadata : the names of the metadata columns
"""
pandas_kind = u('wide_table')
table_type = None
levels = 1
is_table = True
is_shape_reversed = False
def __init__(self, *args, **kwargs):
super(Table, self).__init__(*args, **kwargs)
self.index_axes = []
self.non_index_axes = []
self.values_axes = []
self.data_columns = []
self.metadata = []
self.info = dict()
self.nan_rep = None
self.selection = None
@property
def table_type_short(self):
return self.table_type.split('_')[0]
@property
def format_type(self):
return 'table'
def __unicode__(self):
""" return a pretty representatgion of myself """
self.infer_axes()
dc = ",dc->[%s]" % ','.join(
self.data_columns) if len(self.data_columns) else ''
ver = ''
if self.is_old_version:
ver = "[%s]" % '.'.join([str(x) for x in self.version])
return "%-12.12s%s (typ->%s,nrows->%s,ncols->%s,indexers->[%s]%s)" % (
self.pandas_type, ver, self.table_type_short, self.nrows,
self.ncols, ','.join([a.name for a in self.index_axes]), dc
)
def __getitem__(self, c):
""" return the axis for c """
for a in self.axes:
if c == a.name:
return a
return None
def validate(self, other):
""" validate against an existing table """
if other is None:
return
if other.table_type != self.table_type:
raise TypeError("incompatible table_type with existing [%s - %s]" %
(other.table_type, self.table_type))
for c in ['index_axes', 'non_index_axes', 'values_axes']:
sv = getattr(self, c, None)
ov = getattr(other, c, None)
if sv != ov:
# show the error for the specific axes
for i, sax in enumerate(sv):
oax = ov[i]
if sax != oax:
raise ValueError(
"invalid combinate of [%s] on appending data [%s] "
"vs current table [%s]" % (c, sax, oax))
# should never get here
raise Exception(
"invalid combinate of [%s] on appending data [%s] vs "
"current table [%s]" % (c, sv, ov))
@property
def is_multi_index(self):
"""the levels attribute is 1 or a list in the case of a multi-index"""
return isinstance(self.levels, list)
def validate_metadata(self, existing):
""" create / validate metadata """
self.metadata = [
c.name for c in self.values_axes if c.metadata is not None]
def validate_multiindex(self, obj):
"""validate that we can store the multi-index; reset and return the
new object
"""
levels = [l if l is not None else "level_{0}".format(i)
for i, l in enumerate(obj.index.names)]
try:
return obj.reset_index(), levels
except ValueError:
raise ValueError("duplicate names/columns in the multi-index when "
"storing as a table")
@property
def nrows_expected(self):
""" based on our axes, compute the expected nrows """
return np.prod([i.cvalues.shape[0] for i in self.index_axes])
@property
def is_exists(self):
""" has this table been created """
return u('table') in self.group
@property
def storable(self):
return getattr(self.group, 'table', None)
@property
def table(self):
""" return the table group (this is my storable) """
return self.storable
@property
def dtype(self):
return self.table.dtype
@property
def description(self):
return self.table.description
@property
def axes(self):
return itertools.chain(self.index_axes, self.values_axes)
@property
def ncols(self):
""" the number of total columns in the values axes """
return sum([len(a.values) for a in self.values_axes])
@property
def is_transposed(self):
return False
@property
def data_orientation(self):
"""return a tuple of my permutated axes, non_indexable at the front"""
return tuple(itertools.chain([int(a[0]) for a in self.non_index_axes],
[int(a.axis) for a in self.index_axes]))
def queryables(self):
""" return a dict of the kinds allowable columns for this object """
# compute the values_axes queryables
return dict(
[(a.cname, a) for a in self.index_axes] +
[(self.storage_obj_type._AXIS_NAMES[axis], None)
for axis, values in self.non_index_axes] +
[(v.cname, v) for v in self.values_axes
if v.name in set(self.data_columns)]
)
def index_cols(self):
""" return a list of my index cols """
return [(i.axis, i.cname) for i in self.index_axes]
def values_cols(self):
""" return a list of my values cols """
return [i.cname for i in self.values_axes]
def _get_metadata_path(self, key):
""" return the metadata pathname for this key """
return "{group}/meta/{key}/meta".format(group=self.group._v_pathname,
key=key)
def write_metadata(self, key, values):
"""
write out a meta data array to the key as a fixed-format Series
Parameters
----------
key : string
values : ndarray
"""
values = Series(values)
self.parent.put(self._get_metadata_path(key), values, format='table',
encoding=self.encoding, nan_rep=self.nan_rep)
def read_metadata(self, key):
""" return the meta data array for this key """
if getattr(getattr(self.group, 'meta', None), key, None) is not None:
return self.parent.select(self._get_metadata_path(key))
return None
def set_info(self):
""" update our table index info """
self.attrs.info = self.info
def set_attrs(self):
""" set our table type & indexables """
self.attrs.table_type = str(self.table_type)
self.attrs.index_cols = self.index_cols()
self.attrs.values_cols = self.values_cols()
self.attrs.non_index_axes = self.non_index_axes
self.attrs.data_columns = self.data_columns
self.attrs.nan_rep = self.nan_rep
self.attrs.encoding = self.encoding
self.attrs.levels = self.levels
self.attrs.metadata = self.metadata
self.set_info()
def get_attrs(self):
""" retrieve our attributes """
self.non_index_axes = getattr(
self.attrs, 'non_index_axes', None) or []
self.data_columns = getattr(
self.attrs, 'data_columns', None) or []
self.info = getattr(
self.attrs, 'info', None) or dict()
self.nan_rep = getattr(self.attrs, 'nan_rep', None)
self.encoding = _ensure_encoding(
getattr(self.attrs, 'encoding', None))
self.levels = getattr(
self.attrs, 'levels', None) or []
self.index_axes = [
a.infer(self) for a in self.indexables if a.is_an_indexable
]
self.values_axes = [
a.infer(self) for a in self.indexables if not a.is_an_indexable
]
self.metadata = getattr(
self.attrs, 'metadata', None) or []
def validate_version(self, where=None):
""" are we trying to operate on an old version? """
if where is not None:
if (self.version[0] <= 0 and self.version[1] <= 10 and
self.version[2] < 1):
ws = incompatibility_doc % '.'.join(
[str(x) for x in self.version])
warnings.warn(ws, IncompatibilityWarning)
def validate_min_itemsize(self, min_itemsize):
"""validate the min_itemisze doesn't contain items that are not in the
axes this needs data_columns to be defined
"""
if min_itemsize is None:
return
if not isinstance(min_itemsize, dict):
return
q = self.queryables()
for k, v in min_itemsize.items():
# ok, apply generally
if k == 'values':
continue
if k not in q:
raise ValueError(
"min_itemsize has the key [%s] which is not an axis or "
"data_column" % k)
@property
def indexables(self):
""" create/cache the indexables if they don't exist """
if self._indexables is None:
self._indexables = []
# index columns
self._indexables.extend([
IndexCol(name=name, axis=axis, pos=i)
for i, (axis, name) in enumerate(self.attrs.index_cols)
])
# values columns
dc = set(self.data_columns)
base_pos = len(self._indexables)
def f(i, c):
klass = DataCol
if c in dc:
klass = DataIndexableCol
return klass.create_for_block(i=i, name=c, pos=base_pos + i,
version=self.version)
self._indexables.extend(
[f(i, c) for i, c in enumerate(self.attrs.values_cols)])
return self._indexables
def create_index(self, columns=None, optlevel=None, kind=None):
"""
Create a pytables index on the specified columns
note: cannot index Time64Col() or ComplexCol currently;
PyTables must be >= 3.0
Parameters
----------
columns : False (don't create an index), True (create all columns
index), None or list_like (the indexers to index)
optlevel: optimization level (defaults to 6)
kind : kind of index (defaults to 'medium')
Exceptions
----------
raises if the node is not a table
"""
if not self.infer_axes():
return
if columns is False:
return
# index all indexables and data_columns
if columns is None or columns is True:
columns = [a.cname for a in self.axes if a.is_data_indexable]
if not isinstance(columns, (tuple, list)):
columns = [columns]
kw = dict()
if optlevel is not None:
kw['optlevel'] = optlevel
if kind is not None:
kw['kind'] = kind
table = self.table
for c in columns:
v = getattr(table.cols, c, None)
if v is not None:
# remove the index if the kind/optlevel have changed
if v.is_indexed:
index = v.index
cur_optlevel = index.optlevel
cur_kind = index.kind
if kind is not None and cur_kind != kind:
v.remove_index()
else:
kw['kind'] = cur_kind
if optlevel is not None and cur_optlevel != optlevel:
v.remove_index()
else:
kw['optlevel'] = cur_optlevel
# create the index
if not v.is_indexed:
if v.type.startswith('complex'):
raise TypeError(
'Columns containing complex values can be stored '
'but cannot'
' be indexed when using table format. Either use '
'fixed format, set index=False, or do not include '
'the columns containing complex values to '
'data_columns when initializing the table.')
v.create_index(**kw)
def read_axes(self, where, **kwargs):
"""create and return the axes sniffed from the table: return boolean
for success
"""
# validate the version
self.validate_version(where)
# infer the data kind
if not self.infer_axes():
return False
# create the selection
self.selection = Selection(self, where=where, **kwargs)
values = self.selection.select()
# convert the data
for a in self.axes:
a.set_info(self.info)
a.convert(values, nan_rep=self.nan_rep, encoding=self.encoding)
return True
def get_object(self, obj):
""" return the data for this obj """
return obj
def validate_data_columns(self, data_columns, min_itemsize):
"""take the input data_columns and min_itemize and create a data
columns spec
"""
if not len(self.non_index_axes):
return []
axis, axis_labels = self.non_index_axes[0]
info = self.info.get(axis, dict())
if info.get('type') == 'MultiIndex' and data_columns:
raise ValueError("cannot use a multi-index on axis [{0}] with "
"data_columns {1}".format(axis, data_columns))
# evaluate the passed data_columns, True == use all columns
# take only valide axis labels
if data_columns is True:
data_columns = list(axis_labels)
elif data_columns is None:
data_columns = []
# if min_itemsize is a dict, add the keys (exclude 'values')
if isinstance(min_itemsize, dict):
existing_data_columns = set(data_columns)
data_columns.extend([
k for k in min_itemsize.keys()
if k != 'values' and k not in existing_data_columns
])
# return valid columns in the order of our axis
return [c for c in data_columns if c in axis_labels]
def create_axes(self, axes, obj, validate=True, nan_rep=None,
data_columns=None, min_itemsize=None, **kwargs):
""" create and return the axes
leagcy tables create an indexable column, indexable index,
non-indexable fields
Parameters:
-----------
axes: a list of the axes in order to create (names or numbers of
the axes)
obj : the object to create axes on
validate: validate the obj against an existing object already
written
min_itemsize: a dict of the min size for a column in bytes
nan_rep : a values to use for string column nan_rep
encoding : the encoding for string values
data_columns : a list of columns that we want to create separate to
allow indexing (or True will force all columns)
"""
# set the default axes if needed
if axes is None:
try:
axes = _AXES_MAP[type(obj)]
except:
raise TypeError("cannot properly create the storer for: "
"[group->%s,value->%s]"
% (self.group._v_name, type(obj)))
# map axes to numbers
axes = [obj._get_axis_number(a) for a in axes]
# do we have an existing table (if so, use its axes & data_columns)
if self.infer_axes():
existing_table = self.copy()
existing_table.infer_axes()
axes = [a.axis for a in existing_table.index_axes]
data_columns = existing_table.data_columns
nan_rep = existing_table.nan_rep
self.encoding = existing_table.encoding
self.info = copy.copy(existing_table.info)
else:
existing_table = None
# currently support on ndim-1 axes
if len(axes) != self.ndim - 1:
raise ValueError(
"currently only support ndim-1 indexers in an AppendableTable")
# create according to the new data
self.non_index_axes = []
self.data_columns = []
# nan_representation
if nan_rep is None:
nan_rep = 'nan'
self.nan_rep = nan_rep
# create axes to index and non_index
index_axes_map = dict()
for i, a in enumerate(obj.axes):
if i in axes:
name = obj._AXIS_NAMES[i]
index_axes_map[i] = _convert_index(
a, self.encoding, self.format_type
).set_name(name).set_axis(i)
else:
# we might be able to change the axes on the appending data if
# necessary
append_axis = list(a)
if existing_table is not None:
indexer = len(self.non_index_axes)
exist_axis = existing_table.non_index_axes[indexer][1]
if not array_equivalent(np.array(append_axis),
np.array(exist_axis)):
# ahah! -> reindex
if array_equivalent(np.array(sorted(append_axis)),
np.array(sorted(exist_axis))):
append_axis = exist_axis
# the non_index_axes info
info = _get_info(self.info, i)
info['names'] = list(a.names)
info['type'] = a.__class__.__name__
self.non_index_axes.append((i, append_axis))
# set axis positions (based on the axes)
self.index_axes = [
index_axes_map[a].set_pos(j).update_info(self.info)
for j, a in enumerate(axes)
]
j = len(self.index_axes)
# check for column conflicts
for a in self.axes:
a.maybe_set_size(min_itemsize=min_itemsize)
# reindex by our non_index_axes & compute data_columns
for a in self.non_index_axes:
obj = _reindex_axis(obj, a[0], a[1])
def get_blk_items(mgr, blocks):
return [mgr.items.take(blk.mgr_locs) for blk in blocks]
# figure out data_columns and get out blocks
block_obj = self.get_object(obj)._consolidate()
blocks = block_obj._data.blocks
blk_items = get_blk_items(block_obj._data, blocks)
if len(self.non_index_axes):
axis, axis_labels = self.non_index_axes[0]
data_columns = self.validate_data_columns(
data_columns, min_itemsize)
if len(data_columns):
mgr = block_obj.reindex_axis(
Index(axis_labels).difference(Index(data_columns)),
axis=axis
)._data
blocks = list(mgr.blocks)
blk_items = get_blk_items(mgr, blocks)
for c in data_columns:
mgr = block_obj.reindex_axis([c], axis=axis)._data
blocks.extend(mgr.blocks)
blk_items.extend(get_blk_items(mgr, mgr.blocks))
# reorder the blocks in the same order as the existing_table if we can
if existing_table is not None:
by_items = dict([(tuple(b_items.tolist()), (b, b_items))
for b, b_items in zip(blocks, blk_items)])
new_blocks = []
new_blk_items = []
for ea in existing_table.values_axes:
items = tuple(ea.values)
try:
b, b_items = by_items.pop(items)
new_blocks.append(b)
new_blk_items.append(b_items)
except:
raise ValueError(
"cannot match existing table structure for [%s] on "
"appending data" % ','.join(pprint_thing(item) for
item in items))
blocks = new_blocks
blk_items = new_blk_items
# add my values
self.values_axes = []
for i, (b, b_items) in enumerate(zip(blocks, blk_items)):
# shape of the data column are the indexable axes
klass = DataCol
name = None
# we have a data_column
if (data_columns and len(b_items) == 1 and
b_items[0] in data_columns):
klass = DataIndexableCol
name = b_items[0]
self.data_columns.append(name)
# make sure that we match up the existing columns
# if we have an existing table
if existing_table is not None and validate:
try:
existing_col = existing_table.values_axes[i]
except:
raise ValueError("Incompatible appended table [%s] with "
"existing table [%s]"
% (blocks, existing_table.values_axes))
else:
existing_col = None
try:
col = klass.create_for_block(
i=i, name=name, version=self.version)
col.set_atom(block=b, block_items=b_items,
existing_col=existing_col,
min_itemsize=min_itemsize,
nan_rep=nan_rep,
encoding=self.encoding,
info=self.info,
**kwargs)
col.set_pos(j)
self.values_axes.append(col)
except (NotImplementedError, ValueError, TypeError) as e:
raise e
except Exception as detail:
raise Exception(
"cannot find the correct atom type -> "
"[dtype->%s,items->%s] %s"
% (b.dtype.name, b_items, str(detail))
)
j += 1
# validate our min_itemsize
self.validate_min_itemsize(min_itemsize)
# validate our metadata
self.validate_metadata(existing_table)
# validate the axes if we have an existing table
if validate:
self.validate(existing_table)
def process_axes(self, obj, columns=None):
""" process axes filters """
# make a copy to avoid side effects
if columns is not None:
columns = list(columns)
# make sure to include levels if we have them
if columns is not None and self.is_multi_index:
for n in self.levels:
if n not in columns:
columns.insert(0, n)
# reorder by any non_index_axes & limit to the select columns
for axis, labels in self.non_index_axes:
obj = _reindex_axis(obj, axis, labels, columns)
# apply the selection filters (but keep in the same order)
if self.selection.filter is not None:
for field, op, filt in self.selection.filter.format():
def process_filter(field, filt):
for axis_name in obj._AXIS_NAMES.values():
axis_number = obj._get_axis_number(axis_name)
axis_values = obj._get_axis(axis_name)
# see if the field is the name of an axis
if field == axis_name:
# if we have a multi-index, then need to include
# the levels
if self.is_multi_index:
filt = filt.union(Index(self.levels))
takers = op(axis_values, filt)
return obj.loc._getitem_axis(takers,
axis=axis_number)
# this might be the name of a file IN an axis
elif field in axis_values:
# we need to filter on this dimension
values = _ensure_index(getattr(obj, field).values)
filt = _ensure_index(filt)
# hack until we support reversed dim flags
if isinstance(obj, DataFrame):
axis_number = 1 - axis_number
takers = op(values, filt)
return obj.loc._getitem_axis(takers,
axis=axis_number)
raise ValueError(
"cannot find the field [%s] for filtering!" % field)
obj = process_filter(field, filt)
return obj
def create_description(self, complib=None, complevel=None,
fletcher32=False, expectedrows=None):
""" create the description of the table from the axes & values """
# provided expected rows if its passed
if expectedrows is None:
expectedrows = max(self.nrows_expected, 10000)
d = dict(name='table', expectedrows=expectedrows)
# description from the axes & values
d['description'] = dict([(a.cname, a.typ) for a in self.axes])
if complib:
if complevel is None:
complevel = self._complevel or 9
filters = _tables().Filters(
complevel=complevel, complib=complib,
fletcher32=fletcher32 or self._fletcher32)
d['filters'] = filters
elif self._filters is not None:
d['filters'] = self._filters
return d
def read_coordinates(self, where=None, start=None, stop=None, **kwargs):
"""select coordinates (row numbers) from a table; return the
coordinates object
"""
# validate the version
self.validate_version(where)
# infer the data kind
if not self.infer_axes():
return False
# create the selection
self.selection = Selection(
self, where=where, start=start, stop=stop, **kwargs)
coords = self.selection.select_coords()
if self.selection.filter is not None:
for field, op, filt in self.selection.filter.format():
data = self.read_column(
field, start=coords.min(), stop=coords.max() + 1)
coords = coords[
op(data.iloc[coords - coords.min()], filt).values]
return Index(coords)
def read_column(self, column, where=None, start=None, stop=None, **kwargs):
"""return a single column from the table, generally only indexables
are interesting
"""
# validate the version
self.validate_version()
# infer the data kind
if not self.infer_axes():
return False
if where is not None:
raise TypeError("read_column does not currently accept a where "
"clause")
# find the axes
for a in self.axes:
if column == a.name:
if not a.is_data_indexable:
raise ValueError(
"column [%s] can not be extracted individually; it is "
"not data indexable" % column)
# column must be an indexable or a data column
c = getattr(self.table.cols, column)
a.set_info(self.info)
return Series(_set_tz(a.convert(c[start:stop],
nan_rep=self.nan_rep,
encoding=self.encoding
).take_data(),
a.tz, True), name=column)
raise KeyError("column [%s] not found in the table" % column)
class WORMTable(Table):
""" a write-once read-many table: this format DOES NOT ALLOW appending to a
table. writing is a one-time operation the data are stored in a format
that allows for searching the data on disk
"""
table_type = u('worm')
def read(self, **kwargs):
""" read the indicies and the indexing array, calculate offset rows and
return """
raise NotImplementedError("WORMTable needs to implement read")
def write(self, **kwargs):
""" write in a format that we can search later on (but cannot append
to): write out the indicies and the values using _write_array
(e.g. a CArray) create an indexing table so that we can search
"""
raise NotImplementedError("WORKTable needs to implement write")
class LegacyTable(Table):
""" an appendable table: allow append/query/delete operations to a
(possibily) already existing appendable table this table ALLOWS
append (but doesn't require them), and stores the data in a format
that can be easily searched
"""
_indexables = [
IndexCol(name='index', axis=1, pos=0),
IndexCol(name='column', axis=2, pos=1, index_kind='columns_kind'),
DataCol(name='fields', cname='values', kind_attr='fields', pos=2)
]
table_type = u('legacy')
ndim = 3
def write(self, **kwargs):
raise TypeError("write operations are not allowed on legacy tables!")
def read(self, where=None, columns=None, **kwargs):
"""we have n indexable columns, with an arbitrary number of data
axes
"""
if not self.read_axes(where=where, **kwargs):
return None
lst_vals = [a.values for a in self.index_axes]
labels, levels = _factorize_from_iterables(lst_vals)
# labels and levels are tuples but lists are expected
labels = list(labels)
levels = list(levels)
N = [len(lvl) for lvl in levels]
# compute the key
key = _factor_indexer(N[1:], labels)
objs = []
if len(unique(key)) == len(key):
sorter, _ = algos.groupsort_indexer(
_ensure_int64(key), np.prod(N))
sorter = _ensure_platform_int(sorter)
# create the objs
for c in self.values_axes:
# the data need to be sorted
sorted_values = c.take_data().take(sorter, axis=0)
if sorted_values.ndim == 1:
sorted_values = sorted_values.reshape(
(sorted_values.shape[0], 1))
take_labels = [l.take(sorter) for l in labels]
items = Index(c.values)
block = _block2d_to_blocknd(
values=sorted_values, placement=np.arange(len(items)),
shape=tuple(N), labels=take_labels, ref_items=items)
# create the object
mgr = BlockManager([block], [items] + levels)
obj = self.obj_type(mgr)
# permute if needed
if self.is_transposed:
obj = obj.transpose(
*tuple(Series(self.data_orientation).argsort()))
objs.append(obj)
else:
warnings.warn(duplicate_doc, DuplicateWarning, stacklevel=5)
# reconstruct
long_index = MultiIndex.from_arrays(
[i.values for i in self.index_axes])
for c in self.values_axes:
lp = DataFrame(c.data, index=long_index, columns=c.values)
# need a better algorithm
tuple_index = long_index.values
unique_tuples = lib.fast_unique(tuple_index)
unique_tuples = _asarray_tuplesafe(unique_tuples)
indexer = match(unique_tuples, tuple_index)
indexer = _ensure_platform_int(indexer)
new_index = long_index.take(indexer)
new_values = lp.values.take(indexer, axis=0)
lp = DataFrame(new_values, index=new_index, columns=lp.columns)
objs.append(lp.to_panel())
# create the composite object
if len(objs) == 1:
wp = objs[0]
else:
wp = concat(objs, axis=0, verify_integrity=False)._consolidate()
# apply the selection filters & axis orderings
wp = self.process_axes(wp, columns=columns)
return wp
class LegacyFrameTable(LegacyTable):
""" support the legacy frame table """
pandas_kind = u('frame_table')
table_type = u('legacy_frame')
obj_type = Panel
def read(self, *args, **kwargs):
return super(LegacyFrameTable, self).read(*args, **kwargs)['value']
class LegacyPanelTable(LegacyTable):
""" support the legacy panel table """
table_type = u('legacy_panel')
obj_type = Panel
class AppendableTable(LegacyTable):
""" suppor the new appendable table formats """
_indexables = None
table_type = u('appendable')
def write(self, obj, axes=None, append=False, complib=None,
complevel=None, fletcher32=None, min_itemsize=None,
chunksize=None, expectedrows=None, dropna=False, **kwargs):
if not append and self.is_exists:
self._handle.remove_node(self.group, 'table')
# create the axes
self.create_axes(axes=axes, obj=obj, validate=append,
min_itemsize=min_itemsize,
**kwargs)
for a in self.axes:
a.validate(self, append)
if not self.is_exists:
# create the table
options = self.create_description(complib=complib,
complevel=complevel,
fletcher32=fletcher32,
expectedrows=expectedrows)
# set the table attributes
self.set_attrs()
# create the table
self._handle.create_table(self.group, **options)
else:
pass
# table = self.table
# update my info
self.set_info()
# validate the axes and set the kinds
for a in self.axes:
a.validate_and_set(self, append)
# add the rows
self.write_data(chunksize, dropna=dropna)
def write_data(self, chunksize, dropna=False):
""" we form the data into a 2-d including indexes,values,mask
write chunk-by-chunk """
names = self.dtype.names
nrows = self.nrows_expected
# if dropna==True, then drop ALL nan rows
masks = []
if dropna:
for a in self.values_axes:
# figure the mask: only do if we can successfully process this
# column, otherwise ignore the mask
mask = isnull(a.data).all(axis=0)
if isinstance(mask, np.ndarray):
masks.append(mask.astype('u1', copy=False))
# consolidate masks
if len(masks):
mask = masks[0]
for m in masks[1:]:
mask = mask & m
mask = mask.ravel()
else:
mask = None
# broadcast the indexes if needed
indexes = [a.cvalues for a in self.index_axes]
nindexes = len(indexes)
bindexes = []
for i, idx in enumerate(indexes):
# broadcast to all other indexes except myself
if i > 0 and i < nindexes:
repeater = np.prod(
[indexes[bi].shape[0] for bi in range(0, i)])
idx = np.tile(idx, repeater)
if i < nindexes - 1:
repeater = np.prod([indexes[bi].shape[0]
for bi in range(i + 1, nindexes)])
idx = np.repeat(idx, repeater)
bindexes.append(idx)
# transpose the values so first dimension is last
# reshape the values if needed
values = [a.take_data() for a in self.values_axes]
values = [v.transpose(np.roll(np.arange(v.ndim), v.ndim - 1))
for v in values]
bvalues = []
for i, v in enumerate(values):
new_shape = (nrows,) + self.dtype[names[nindexes + i]].shape
bvalues.append(values[i].reshape(new_shape))
# write the chunks
if chunksize is None:
chunksize = 100000
rows = np.empty(min(chunksize, nrows), dtype=self.dtype)
chunks = int(nrows / chunksize) + 1
for i in range(chunks):
start_i = i * chunksize
end_i = min((i + 1) * chunksize, nrows)
if start_i >= end_i:
break
self.write_data_chunk(
rows,
indexes=[a[start_i:end_i] for a in bindexes],
mask=mask[start_i:end_i] if mask is not None else None,
values=[v[start_i:end_i] for v in bvalues])
def write_data_chunk(self, rows, indexes, mask, values):
"""
Parameters
----------
rows : an empty memory space where we are putting the chunk
indexes : an array of the indexes
mask : an array of the masks
values : an array of the values
"""
# 0 len
for v in values:
if not np.prod(v.shape):
return
try:
nrows = indexes[0].shape[0]
if nrows != len(rows):
rows = np.empty(nrows, dtype=self.dtype)
names = self.dtype.names
nindexes = len(indexes)
# indexes
for i, idx in enumerate(indexes):
rows[names[i]] = idx
# values
for i, v in enumerate(values):
rows[names[i + nindexes]] = v
# mask
if mask is not None:
m = ~mask.ravel().astype(bool, copy=False)
if not m.all():
rows = rows[m]
except Exception as detail:
raise Exception("cannot create row-data -> %s" % detail)
try:
if len(rows):
self.table.append(rows)
self.table.flush()
except Exception as detail:
raise TypeError("tables cannot write this data -> %s" % detail)
def delete(self, where=None, start=None, stop=None, **kwargs):
# delete all rows (and return the nrows)
if where is None or not len(where):
if start is None and stop is None:
nrows = self.nrows
self._handle.remove_node(self.group, recursive=True)
else:
# pytables<3.0 would remove a single row with stop=None
if stop is None:
stop = self.nrows
nrows = self.table.remove_rows(start=start, stop=stop)
self.table.flush()
return nrows
# infer the data kind
if not self.infer_axes():
return None
# create the selection
table = self.table
self.selection = Selection(
self, where, start=start, stop=stop, **kwargs)
values = self.selection.select_coords()
# delete the rows in reverse order
l = Series(values).sort_values()
ln = len(l)
if ln:
# construct groups of consecutive rows
diff = l.diff()
groups = list(diff[diff > 1].index)
# 1 group
if not len(groups):
groups = [0]
# final element
if groups[-1] != ln:
groups.append(ln)
# initial element
if groups[0] != 0:
groups.insert(0, 0)
# we must remove in reverse order!
pg = groups.pop()
for g in reversed(groups):
rows = l.take(lrange(g, pg))
table.remove_rows(start=rows[rows.index[0]
], stop=rows[rows.index[-1]] + 1)
pg = g
self.table.flush()
# return the number of rows removed
return ln
class AppendableFrameTable(AppendableTable):
""" suppor the new appendable table formats """
pandas_kind = u('frame_table')
table_type = u('appendable_frame')
ndim = 2
obj_type = DataFrame
@property
def is_transposed(self):
return self.index_axes[0].axis == 1
def get_object(self, obj):
""" these are written transposed """
if self.is_transposed:
obj = obj.T
return obj
def read(self, where=None, columns=None, **kwargs):
if not self.read_axes(where=where, **kwargs):
return None
info = (self.info.get(self.non_index_axes[0][0], dict())
if len(self.non_index_axes) else dict())
index = self.index_axes[0].values
frames = []
for a in self.values_axes:
# we could have a multi-index constructor here
# _ensure_index doesn't recognized our list-of-tuples here
if info.get('type') == 'MultiIndex':
cols = MultiIndex.from_tuples(a.values)
else:
cols = Index(a.values)
names = info.get('names')
if names is not None:
cols.set_names(names, inplace=True)
if self.is_transposed:
values = a.cvalues
index_ = cols
cols_ = Index(index, name=getattr(index, 'name', None))
else:
values = a.cvalues.T
index_ = Index(index, name=getattr(index, 'name', None))
cols_ = cols
# if we have a DataIndexableCol, its shape will only be 1 dim
if values.ndim == 1 and isinstance(values, np.ndarray):
values = values.reshape((1, values.shape[0]))
block = make_block(values, placement=np.arange(len(cols_)))
mgr = BlockManager([block], [cols_, index_])
frames.append(DataFrame(mgr))
if len(frames) == 1:
df = frames[0]
else:
df = concat(frames, axis=1)
# apply the selection filters & axis orderings
df = self.process_axes(df, columns=columns)
return df
class AppendableSeriesTable(AppendableFrameTable):
""" support the new appendable table formats """
pandas_kind = u('series_table')
table_type = u('appendable_series')
ndim = 2
obj_type = Series
storage_obj_type = DataFrame
@property
def is_transposed(self):
return False
def get_object(self, obj):
return obj
def write(self, obj, data_columns=None, **kwargs):
""" we are going to write this as a frame table """
if not isinstance(obj, DataFrame):
name = obj.name or 'values'
obj = DataFrame({name: obj}, index=obj.index)
obj.columns = [name]
return super(AppendableSeriesTable, self).write(
obj=obj, data_columns=obj.columns.tolist(), **kwargs)
def read(self, columns=None, **kwargs):
is_multi_index = self.is_multi_index
if columns is not None and is_multi_index:
for n in self.levels:
if n not in columns:
columns.insert(0, n)
s = super(AppendableSeriesTable, self).read(columns=columns, **kwargs)
if is_multi_index:
s.set_index(self.levels, inplace=True)
s = s.iloc[:, 0]
# remove the default name
if s.name == 'values':
s.name = None
return s
class AppendableMultiSeriesTable(AppendableSeriesTable):
""" support the new appendable table formats """
pandas_kind = u('series_table')
table_type = u('appendable_multiseries')
def write(self, obj, **kwargs):
""" we are going to write this as a frame table """
name = obj.name or 'values'
obj, self.levels = self.validate_multiindex(obj)
cols = list(self.levels)
cols.append(name)
obj.columns = cols
return super(AppendableMultiSeriesTable, self).write(obj=obj, **kwargs)
class GenericTable(AppendableFrameTable):
""" a table that read/writes the generic pytables table format """
pandas_kind = u('frame_table')
table_type = u('generic_table')
ndim = 2
obj_type = DataFrame
@property
def pandas_type(self):
return self.pandas_kind
@property
def storable(self):
return getattr(self.group, 'table', None) or self.group
def get_attrs(self):
""" retrieve our attributes """
self.non_index_axes = []
self.nan_rep = None
self.levels = []
self.index_axes = [a.infer(self)
for a in self.indexables if a.is_an_indexable]
self.values_axes = [a.infer(self)
for a in self.indexables if not a.is_an_indexable]
self.data_columns = [a.name for a in self.values_axes]
@property
def indexables(self):
""" create the indexables from the table description """
if self._indexables is None:
d = self.description
# the index columns is just a simple index
self._indexables = [GenericIndexCol(name='index', axis=0)]
for i, n in enumerate(d._v_names):
dc = GenericDataIndexableCol(
name=n, pos=i, values=[n], version=self.version)
self._indexables.append(dc)
return self._indexables
def write(self, **kwargs):
raise NotImplementedError("cannot write on an generic table")
class AppendableMultiFrameTable(AppendableFrameTable):
""" a frame with a multi-index """
table_type = u('appendable_multiframe')
obj_type = DataFrame
ndim = 2
_re_levels = re.compile("^level_\d+$")
@property
def table_type_short(self):
return u('appendable_multi')
def write(self, obj, data_columns=None, **kwargs):
if data_columns is None:
data_columns = []
elif data_columns is True:
data_columns = obj.columns.tolist()
obj, self.levels = self.validate_multiindex(obj)
for n in self.levels:
if n not in data_columns:
data_columns.insert(0, n)
return super(AppendableMultiFrameTable, self).write(
obj=obj, data_columns=data_columns, **kwargs)
def read(self, **kwargs):
df = super(AppendableMultiFrameTable, self).read(**kwargs)
df = df.set_index(self.levels)
# remove names for 'level_%d'
df.index = df.index.set_names([
None if self._re_levels.search(l) else l for l in df.index.names
])
return df
class AppendablePanelTable(AppendableTable):
""" suppor the new appendable table formats """
table_type = u('appendable_panel')
ndim = 3
obj_type = Panel
def get_object(self, obj):
""" these are written transposed """
if self.is_transposed:
obj = obj.transpose(*self.data_orientation)
return obj
@property
def is_transposed(self):
return self.data_orientation != tuple(range(self.ndim))
class AppendableNDimTable(AppendablePanelTable):
""" suppor the new appendable table formats """
table_type = u('appendable_ndim')
ndim = 4
obj_type = Panel4D
def _reindex_axis(obj, axis, labels, other=None):
ax = obj._get_axis(axis)
labels = _ensure_index(labels)
# try not to reindex even if other is provided
# if it equals our current index
if other is not None:
other = _ensure_index(other)
if (other is None or labels.equals(other)) and labels.equals(ax):
return obj
labels = _ensure_index(labels.unique())
if other is not None:
labels = _ensure_index(other.unique()) & labels
if not labels.equals(ax):
slicer = [slice(None, None)] * obj.ndim
slicer[axis] = labels
obj = obj.loc[tuple(slicer)]
return obj
def _get_info(info, name):
""" get/create the info for this name """
try:
idx = info[name]
except:
idx = info[name] = dict()
return idx
# tz to/from coercion
def _get_tz(tz):
""" for a tz-aware type, return an encoded zone """
zone = tslib.get_timezone(tz)
if zone is None:
zone = tslib.tot_seconds(tz.utcoffset())
return zone
def _set_tz(values, tz, preserve_UTC=False, coerce=False):
"""
coerce the values to a DatetimeIndex if tz is set
preserve the input shape if possible
Parameters
----------
values : ndarray
tz : string/pickled tz object
preserve_UTC : boolean,
preserve the UTC of the result
coerce : if we do not have a passed timezone, coerce to M8[ns] ndarray
"""
if tz is not None:
name = getattr(values, 'name', None)
values = values.ravel()
tz = tslib.get_timezone(_ensure_decoded(tz))
values = DatetimeIndex(values, name=name)
if values.tz is None:
values = values.tz_localize('UTC').tz_convert(tz)
if preserve_UTC:
if tz == 'UTC':
values = list(values)
elif coerce:
values = np.asarray(values, dtype='M8[ns]')
return values
def _convert_index(index, encoding=None, format_type=None):
index_name = getattr(index, 'name', None)
if isinstance(index, DatetimeIndex):
converted = index.asi8
return IndexCol(converted, 'datetime64', _tables().Int64Col(),
freq=getattr(index, 'freq', None),
tz=getattr(index, 'tz', None),
index_name=index_name)
elif isinstance(index, TimedeltaIndex):
converted = index.asi8
return IndexCol(converted, 'timedelta64', _tables().Int64Col(),
freq=getattr(index, 'freq', None),
index_name=index_name)
elif isinstance(index, (Int64Index, PeriodIndex)):
atom = _tables().Int64Col()
# avoid to store ndarray of Period objects
return IndexCol(index._values, 'integer', atom,
freq=getattr(index, 'freq', None),
index_name=index_name)
if isinstance(index, MultiIndex):
raise TypeError('MultiIndex not supported here!')
inferred_type = lib.infer_dtype(index)
values = np.asarray(index)
if inferred_type == 'datetime64':
converted = values.view('i8')
return IndexCol(converted, 'datetime64', _tables().Int64Col(),
freq=getattr(index, 'freq', None),
tz=getattr(index, 'tz', None),
index_name=index_name)
elif inferred_type == 'timedelta64':
converted = values.view('i8')
return IndexCol(converted, 'timedelta64', _tables().Int64Col(),
freq=getattr(index, 'freq', None),
index_name=index_name)
elif inferred_type == 'datetime':
converted = np.asarray([(time.mktime(v.timetuple()) +
v.microsecond / 1E6) for v in values],
dtype=np.float64)
return IndexCol(converted, 'datetime', _tables().Time64Col(),
index_name=index_name)
elif inferred_type == 'date':
converted = np.asarray([v.toordinal() for v in values],
dtype=np.int32)
return IndexCol(converted, 'date', _tables().Time32Col(),
index_name=index_name)
elif inferred_type == 'string':
# atom = _tables().ObjectAtom()
# return np.asarray(values, dtype='O'), 'object', atom
converted = _convert_string_array(values, encoding)
itemsize = converted.dtype.itemsize
return IndexCol(
converted, 'string', _tables().StringCol(itemsize),
itemsize=itemsize, index_name=index_name
)
elif inferred_type == 'unicode':
if format_type == 'fixed':
atom = _tables().ObjectAtom()
return IndexCol(np.asarray(values, dtype='O'), 'object', atom,
index_name=index_name)
raise TypeError(
"[unicode] is not supported as a in index type for [{0}] formats"
.format(format_type)
)
elif inferred_type == 'integer':
# take a guess for now, hope the values fit
atom = _tables().Int64Col()
return IndexCol(np.asarray(values, dtype=np.int64), 'integer', atom,
index_name=index_name)
elif inferred_type == 'floating':
atom = _tables().Float64Col()
return IndexCol(np.asarray(values, dtype=np.float64), 'float', atom,
index_name=index_name)
else: # pragma: no cover
atom = _tables().ObjectAtom()
return IndexCol(np.asarray(values, dtype='O'), 'object', atom,
index_name=index_name)
def _unconvert_index(data, kind, encoding=None):
kind = _ensure_decoded(kind)
if kind == u('datetime64'):
index = DatetimeIndex(data)
elif kind == u('timedelta64'):
index = TimedeltaIndex(data)
elif kind == u('datetime'):
index = np.asarray([datetime.fromtimestamp(v) for v in data],
dtype=object)
elif kind == u('date'):
try:
index = np.asarray(
[date.fromordinal(v) for v in data], dtype=object)
except (ValueError):
index = np.asarray(
[date.fromtimestamp(v) for v in data], dtype=object)
elif kind in (u('integer'), u('float')):
index = np.asarray(data)
elif kind in (u('string')):
index = _unconvert_string_array(data, nan_rep=None, encoding=encoding)
elif kind == u('object'):
index = np.asarray(data[0])
else: # pragma: no cover
raise ValueError('unrecognized index type %s' % kind)
return index
def _unconvert_index_legacy(data, kind, legacy=False, encoding=None):
kind = _ensure_decoded(kind)
if kind == u('datetime'):
index = lib.time64_to_datetime(data)
elif kind in (u('integer')):
index = np.asarray(data, dtype=object)
elif kind in (u('string')):
index = _unconvert_string_array(data, nan_rep=None, encoding=encoding)
else: # pragma: no cover
raise ValueError('unrecognized index type %s' % kind)
return index
def _convert_string_array(data, encoding, itemsize=None):
"""
we take a string-like that is object dtype and coerce to a fixed size
string type
Parameters
----------
data : a numpy array of object dtype
encoding : None or string-encoding
itemsize : integer, optional, defaults to the max length of the strings
Returns
-------
data in a fixed-length string dtype, encoded to bytes if needed
"""
# encode if needed
if encoding is not None and len(data):
data = Series(data.ravel()).str.encode(
encoding).values.reshape(data.shape)
# create the sized dtype
if itemsize is None:
itemsize = lib.max_len_string_array(_ensure_object(data.ravel()))
data = np.asarray(data, dtype="S%d" % itemsize)
return data
def _unconvert_string_array(data, nan_rep=None, encoding=None):
"""
inverse of _convert_string_array
Parameters
----------
data : fixed length string dtyped array
nan_rep : the storage repr of NaN, optional
encoding : the encoding of the data, optional
Returns
-------
an object array of the decoded data
"""
shape = data.shape
data = np.asarray(data.ravel(), dtype=object)
# guard against a None encoding in PY3 (because of a legacy
# where the passed encoding is actually None)
encoding = _ensure_encoding(encoding)
if encoding is not None and len(data):
itemsize = lib.max_len_string_array(_ensure_object(data))
if compat.PY3:
dtype = "U{0}".format(itemsize)
else:
dtype = "S{0}".format(itemsize)
if isinstance(data[0], compat.binary_type):
data = Series(data).str.decode(encoding).values
else:
data = data.astype(dtype, copy=False).astype(object, copy=False)
if nan_rep is None:
nan_rep = 'nan'
data = lib.string_array_replace_from_nan_rep(data, nan_rep)
return data.reshape(shape)
def _maybe_convert(values, val_kind, encoding):
if _need_convert(val_kind):
conv = _get_converter(val_kind, encoding)
# conv = np.frompyfunc(conv, 1, 1)
values = conv(values)
return values
def _get_converter(kind, encoding):
kind = _ensure_decoded(kind)
if kind == 'datetime64':
return lambda x: np.asarray(x, dtype='M8[ns]')
elif kind == 'datetime':
return lib.convert_timestamps
elif kind == 'string':
return lambda x: _unconvert_string_array(x, encoding=encoding)
else: # pragma: no cover
raise ValueError('invalid kind %s' % kind)
def _need_convert(kind):
kind = _ensure_decoded(kind)
if kind in (u('datetime'), u('datetime64'), u('string')):
return True
return False
class Selection(object):
"""
Carries out a selection operation on a tables.Table object.
Parameters
----------
table : a Table object
where : list of Terms (or convertable to)
start, stop: indicies to start and/or stop selection
"""
def __init__(self, table, where=None, start=None, stop=None, **kwargs):
self.table = table
self.where = where
self.start = start
self.stop = stop
self.condition = None
self.filter = None
self.terms = None
self.coordinates = None
if is_list_like(where):
# see if we have a passed coordinate like
try:
inferred = lib.infer_dtype(where)
if inferred == 'integer' or inferred == 'boolean':
where = np.asarray(where)
if where.dtype == np.bool_:
start, stop = self.start, self.stop
if start is None:
start = 0
if stop is None:
stop = self.table.nrows
self.coordinates = np.arange(start, stop)[where]
elif issubclass(where.dtype.type, np.integer):
if ((self.start is not None and
(where < self.start).any()) or
(self.stop is not None and
(where >= self.stop).any())):
raise ValueError(
"where must have index locations >= start and "
"< stop"
)
self.coordinates = where
except:
pass
if self.coordinates is None:
self.terms = self.generate(where)
# create the numexpr & the filter
if self.terms is not None:
self.condition, self.filter = self.terms.evaluate()
def generate(self, where):
""" where can be a : dict,list,tuple,string """
if where is None:
return None
q = self.table.queryables()
try:
return Expr(where, queryables=q, encoding=self.table.encoding)
except NameError:
# raise a nice message, suggesting that the user should use
# data_columns
raise ValueError(
"The passed where expression: {0}\n"
" contains an invalid variable reference\n"
" all of the variable refrences must be a "
"reference to\n"
" an axis (e.g. 'index' or 'columns'), or a "
"data_column\n"
" The currently defined references are: {1}\n"
.format(where, ','.join(q.keys()))
)
def select(self):
"""
generate the selection
"""
if self.condition is not None:
return self.table.table.read_where(self.condition.format(),
start=self.start,
stop=self.stop)
elif self.coordinates is not None:
return self.table.table.read_coordinates(self.coordinates)
return self.table.table.read(start=self.start, stop=self.stop)
def select_coords(self):
"""
generate the selection
"""
start, stop = self.start, self.stop
nrows = self.table.nrows
if start is None:
start = 0
elif start < 0:
start += nrows
if self.stop is None:
stop = nrows
elif stop < 0:
stop += nrows
if self.condition is not None:
return self.table.table.get_where_list(self.condition.format(),
start=start, stop=stop,
sort=True)
elif self.coordinates is not None:
return self.coordinates
return np.arange(start, stop)
# utilities ###
def timeit(key, df, fn=None, remove=True, **kwargs):
if fn is None:
fn = 'timeit.h5'
store = HDFStore(fn, mode='w')
store.append(key, df, **kwargs)
store.close()
if remove:
os.remove(fn)
| bsd-3-clause |
PiscesDream/Homework2015-2016 | numeric/week2/NewtonInterpolation.py | 1 | 2902 | '''
A python script for Newton Interpolation Method
'''
import numpy as np
def calcDifferenceQuotient(x, fx):
assert len(x) == len(fx)
N = len(x)
dq = np.zeros((N, N))
dq[0] = fx
for l in range(1, N):
for i in range(N):
if i+l >= N: continue
j = i+l
dq[l][i] = (dq[l-1][i]-dq[l-1][i+1])/(x[i]-x[j])
print 'dq[%s] = (%.3f-%.3f)/(%.3f-%.3f) = %.3f' % (', '.join(map(lambda x: 'x_%d' % x, range(i, i+l+1))), dq[l-1][i], dq[l-1][i+1], x[i], x[j], dq[l][i])
return dq
def getNewtonInterpolation(x, dq, plot=False):
N = dq.shape[0]
print 'p(x) = %.3f' % dq[0][0]
for i in range(0, N-1):
print '\t+', ''.join(map(lambda y: '(x-%.3f)' % y, dq[0, :i+1])), '*', '%.3f'%dq[i+1][0]
def p(_x):
result = dq[0][0]
if plot: print 'f(%.3f) = %.3f' % (_x, dq[0][0])
for i in range(0, N-1):
if plot: print '\t+', ''.join(map(lambda y: '(%.3f-%.3f)' % (_x, y), dq[0, :i+1])), '*', '%.3f'%dq[i+1][0]
result += (_x-x[:i+1]).prod()*(dq[i+1][0])
if plot: print '\t= %.6f' % result
return result
return p
class NewtonInterpolation(object):
def __init__(self, x = [], fx = []):
self.setData(x, fx)
def setData(self, x = [], fx = []):
self.x = np.array(x)
self.fx = np.array(fx)
assert len(x) == len(fx)
self.N = len(x)
# Calculate Difference Quotient
self.dq = calcDifferenceQuotient(self.x, self.fx)
# Calculate the interpolation equation
self.p = getNewtonInterpolation(self.x, self.dq)
def predict(self, x):
return map(self.p, x)
def drawAndSave(self, x0=None, xn=None, filename='NewtonInterpolationPlot.jpg', points=100, f=None):
if not x0: x0 = self.x[0]
if not xn: xn = self.x[-1]
x = np.linspace(x0, xn, points)
y = self.predict(x)
import matplotlib
matplotlib.use('Agg')
original_plot, = plt.plot(self.x, self.fx, '.', color='red', ms=13, label='Original points')
legend = [p_plot, original_plot]
if f:
f_plot, = plt.plot(x, map(f, x), '-', lw=2, color='green', label='Original Function')
legend.append(f_plot)
plt.legend(handles=legend, loc='upper left')
plt.savefig(filename)
print 'Plot is saved in [%s]' % filename
if __name__ == '__main__':
# data 1: sin
x = [np.pi/6, np.pi/4, np.pi/3]
fx = [1.0/2.0, 1.0/np.sqrt(2.0), np.sqrt(3.0)/2.0]
# data 2: random
x = np.random.uniform(0, 1000, size=(10,))
fx = np.random.uniform(0, 1000, size=(10,))
x.sort()
print '(x, y): ', zip(x, fx)
ni = NewtonInterpolation(x, fx)
print map(ni.p, x)
# ni.drawAndSave(-np.pi/2, np.pi)
# ni.drawAndSave(-np.pi/2, np.pi, f=np.sin)
ni.drawAndSave()
| mit |
cauchycui/scikit-learn | examples/linear_model/plot_logistic.py | 312 | 1426 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Logit function
=========================================================
Show in the plot is how the logistic regression would, in this
synthetic dataset, classify values as either 0 or 1,
i.e. class one or two, using the logit-curve.
"""
print(__doc__)
# Code source: Gael Varoquaux
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
# this is our test set, it's just a straight line with some
# Gaussian noise
xmin, xmax = -5, 5
n_samples = 100
np.random.seed(0)
X = np.random.normal(size=n_samples)
y = (X > 0).astype(np.float)
X[X > 0] *= 4
X += .3 * np.random.normal(size=n_samples)
X = X[:, np.newaxis]
# run the classifier
clf = linear_model.LogisticRegression(C=1e5)
clf.fit(X, y)
# and plot the result
plt.figure(1, figsize=(4, 3))
plt.clf()
plt.scatter(X.ravel(), y, color='black', zorder=20)
X_test = np.linspace(-5, 10, 300)
def model(x):
return 1 / (1 + np.exp(-x))
loss = model(X_test * clf.coef_ + clf.intercept_).ravel()
plt.plot(X_test, loss, color='blue', linewidth=3)
ols = linear_model.LinearRegression()
ols.fit(X, y)
plt.plot(X_test, ols.coef_ * X_test + ols.intercept_, linewidth=1)
plt.axhline(.5, color='.5')
plt.ylabel('y')
plt.xlabel('X')
plt.xticks(())
plt.yticks(())
plt.ylim(-.25, 1.25)
plt.xlim(-4, 10)
plt.show()
| bsd-3-clause |
stuartsale/marg_iso | marg_iso/posterior.py | 1 | 36565 | # Import stuff
from __future__ import print_function, division
import emcee
import numpy as np
from scipy import linalg
import sklearn.cluster as sk_c
import sklearn.mixture as sk_m
# Attempt to import matplotlib
try:
import matplotlib as mpl
import matplotlib.pyplot as plt
mpl_present = True
except ImportError:
mpl_present = False
from cluster import posterior_cluster
import isolib as il
from probs import photom_prob
class star_posterior:
""" This is a class to claculate and contain the marginal
posterior distribution on a star's distance modulus,
ln extinction and extinction law, i.e. p(s,lnA,R|D).
The class employs a special, vectorised version of emcee
to sample from the posterior. Marginalisation over Teff,
logg and metallicity is performed by simply ignoring these
values in the chain.
"""
# init function
def __init__(self, gal_l, gal_b, mag_in, d_mag_in, isochrones=None,
isochrone_file=None, init_bands=None):
""" __init__(l, b, mag_in, d_mag_in, isochrones=None,
isochrone_file=None, init_bands=None)
Initialise a star_posterior object, giving it the
on-sky position of the star and some photometry
(with uncertainties).
Parameters
----------
gal_l : float
The Galactic Longitude of the star.
gal_b : float
The Galactic lattitude of the star
mag_in : dict
The photometry of the star, where the band label is
the key and the magnitude is the value.
d_mag_in : dict
The photometric uncertainties, where the band label
is the key and the uncertainty is the value.
isochrones : isolib.iso_grid_tefflogg, optional
A set of isochrones in ([Fe/H], Teff, logg) space.
isochrones_file : string, optional
The location of a file that contains isochrones
that can be read in by isolib for use.
init_bands : list, optional
A (sub-)set of the observed photometric bands
which are used to make an initial guess at the
parameters of the star. Cannot contain bands that
are not in mag_in.
Notes
-----
For a list of vaild photometric bands see <INSERT
ADDRESS>.
Either isochrones or isochrone_file must be provided.
Otherwise the class will have no ischrone information.
"""
self.colors = np.array([x for x in 'bgrcmybgrcmybgrcmybgrcmy'])
self.colors = np.hstack([self.colors] * 20)
self.gal_l = np.radians(gal_l)
self.gal_b = np.radians(gal_b)
self.sinl = np.sin(self.gal_l)
self.cosl = np.cos(self.gal_l)
self.sinb = np.sin(self.gal_b)
self.cosb = np.cos(self.gal_b)
self.mag = mag_in
self.d_mag = d_mag_in
if init_bands is None:
self.init_bands = self.mag.keys()[:2]
else:
self.init_bands = init_bands
if isochrones is None:
if isochrone_file is not None:
isochrones = il.iso_grid_tefflogg(isochrone_file,
bands=self.mag.keys())
else:
raise IOError("Either an isochrone must be provided "
"or a filename for one given")
self.isochrones = isochrones
self.MCMC_run = False
self.best_gmm = None
# ==============================================================
# Functions to work with emcee sampler
def emcee_init(self, N_walkers, chain_length):
""" emcee_init(N_walkers, chain_length)
Initialises the emcee walkers.
Parameters
----------
N_walkers : int
The number of walkers to be used.
chain_length: int
The length of the MCMC chain
"""
self.start_params = np.zeros([N_walkers, 6])
guess_set = []
guess_set.append([0., 3.663, 4.57, 0., 0., 3.1]) # K4V
guess_set.append([0., 3.672, 4.56, 0., 0., 3.1]) # K3V
guess_set.append([0., 3.686, 4.55, 0., 0., 3.1]) # K2V
guess_set.append([0., 3.695, 4.55, 0., 0., 3.1]) # K1V
guess_set.append([0., 3.703, 4.57, 0., 0., 3.1]) # K0V
guess_set.append([0., 3.720, 4.55, 0., 0., 3.1]) # G8V
guess_set.append([0., 3.740, 4.49, 0., 0., 3.1]) # G5V
guess_set.append([0., 3.763, 4.40, 0., 0., 3.1]) # G2V
guess_set.append([0., 3.774, 4.39, 0., 0., 3.1]) # G0V
guess_set.append([0., 3.789, 4.35, 0., 0., 3.1]) # F8V
guess_set.append([0., 3.813, 4.34, 0., 0., 3.1]) # F5V
guess_set.append([0., 3.845, 4.26, 0., 0., 3.1]) # F2V
guess_set.append([0., 3.863, 4.28, 0., 0., 3.1]) # F0V
guess_set.append([0., 3.903, 4.26, 0., 0., 3.1]) # A7V
guess_set.append([0., 3.924, 4.22, 0., 0., 3.1]) # A5V
guess_set.append([0., 3.949, 4.20, 0., 0., 3.1]) # A3V
guess_set.append([0., 3.961, 4.16, 0., 0., 3.1]) # A2V
# guess_set.append([0.,3.763 ,3.20 ,0.,0.,3.1]) #G2III
# guess_set.append([0.,3.700 ,2.75 ,0.,0.,3.1]) #G8III
# guess_set.append([0.,3.663 ,2.52 ,0.,0.,3.1]) #K1III
# guess_set.append([0.,3.602 ,1.25 ,0.,0.,3.1]) #K5III
# guess_set.append([0.,3.591 ,1.10 ,0.,0.,3.1]) #M0III
guess_set.append([0., 3.760, 4.00, 0., 0., 3.1]) # horizontal branch
guess_set.append([0., 3.720, 3.80, 0., 0., 3.1]) #
guess_set.append([0., 3.680, 3.00, 0., 0., 3.1]) #
guess_set.append([0., 3.700, 2.75, 0., 0., 3.1]) # G8III
guess_set.append([0., 3.680, 2.45, 0., 0., 3.1]) #
guess_set.append([0., 3.600, 1.20, 0., 0., 3.1]) # K5III
guess_set.append([0., 3.580, 0.30, 0., 0., 3.1]) # K5III
guess_set = np.array(guess_set)
iso_objs = self.isochrones.query(guess_set[:, 0], guess_set[:, 1],
guess_set[:, 2])
guess_set[:, 4] = (
np.log(np.maximum(0.007,
((self.mag[self.init_bands[0]]
- self.mag[self.init_bands[1]])
- (iso_objs.abs_mag[self.init_bands[0]]
- iso_objs.abs_mag[self.init_bands[1]])
)
/ (iso_objs.AX1[self.init_bands[0]]
[np.arange(guess_set.shape[0]), 11]
- iso_objs.AX1[self.init_bands[1]]
[np.arange(guess_set.shape[0]), 11]))))
guess_set[:, 3] = (self.mag[self.init_bands[0]]
- (iso_objs.AX1[self.init_bands[0]]
[np.arange(guess_set.shape[0]), 11]
* guess_set[:, 4]
+ iso_objs.AX2[self.init_bands[0]]
[np.arange(guess_set.shape[0]), 11]
* guess_set[:, 4] * guess_set[:, 4]
+ iso_objs.abs_mag[self.init_bands[0]]))
metal_min = sorted(self.isochrones.metal_dict.keys())[0]
metal_max = sorted(self.isochrones.metal_dict.keys())[-1]
for it in range(N_walkers):
self.start_params[it, :] = (
guess_set[int(np.random.uniform()*len(guess_set))])
self.start_params[it, 0] = (
metal_min+(metal_max-metal_min)*np.random.uniform())
self.start_params[it, 5] = 2.9+0.4*np.random.uniform()
self.Teff_chain = np.zeros(chain_length)
self.logg_chain = np.zeros(chain_length)
self.feh_chain = np.zeros(chain_length)
self.dist_mod_chain = np.zeros(chain_length)
self.logA_chain = np.zeros(chain_length)
self.RV_chain = np.zeros(chain_length)
self.prob_chain = np.zeros(chain_length)
self.prior_chain = np.zeros(chain_length)
self.Jac_chain = np.zeros(chain_length)
self.accept_chain = np.zeros(chain_length)
self.photom_chain = {}
for band in self.mag:
self.photom_chain[band] = np.zeros(chain_length)
self.itnum_chain = np.zeros(chain_length)
def emcee_run(self, iterations=10000, thin=10, burn_in=2000,
N_walkers=50, prune=True, prune_plot=False,
verbose_chain=True):
""" emcee_run(iterations=10000, thin=10, burn_in=2000,
N_walkers=50, prune=True, prune_plot=False,
verbose_chain=True)
Runs the emcee based inference of the posterior.
Parameters
----------
iterations : int, optional
The number of iterations of emcee that will be
performed.
thin : int, optional
A thinning factor that results in only 1 in
every *thin* iterations being stored to the chain.
burn_in : int, optinal
Sets the length of the burn-in, during which
nothing is retained to the chain.
N_walkers : int, optinal
The number of walkers to be used.
prune : bool, optional
Determines whether a pruning of obviously
'lost' walkers is performed at the end of burn-in.
These walkers are then dropped back onto
randomly chosen 'good' walkers.
prune_plot : bool, optional
Produce a diagnostic plot showing what has
happened during the pruning.
verbose_chain : bool, optional
Provides the option to store the state of a
greater variety of parameters in the chain.
"""
self.verbose_chain = verbose_chain
self.emcee_init(N_walkers,
int((iterations-burn_in)/thin*N_walkers))
sampler = emcee.EnsembleSampler(N_walkers, 6, photom_prob, args=[self],
a=1.5)
# Burn-in
pos, last_prob, state = sampler.run_mcmc(self.start_params, burn_in)
sampler.reset()
if prune:
dbscan = sk_c.DBSCAN(eps=0.05)
# pruning set
pos, last_prob, state = sampler.run_mcmc(pos, 100, rstate0=state,
lnprob0=last_prob)
dbscan.fit(sampler.flatchain[:, 1:2])
labels = dbscan.labels_.astype(np.int)
if prune_plot and mpl_present:
fig = plt.figure()
ax1 = fig.add_subplot(221)
ax1.scatter(sampler.flatchain[:, 1], sampler.flatchain[:, 2],
color='0.5', s=1)
ax1.scatter(pos[:, 1], pos[:, 2],
color=self.colors[labels].tolist(), s=3)
ax1.set_xlim(right=3.5, left=4.5)
ax1.set_ylim(bottom=5., top=2.)
ax1 = fig.add_subplot(223)
ax1.scatter(sampler.flatchain[:, 1], sampler.flatlnprobability,
color='0.5', s=1)
ax1.scatter(pos[:, 1], last_prob,
color=self.colors[labels].tolist(), s=3)
ax1.set_xlim(right=3.5, left=4.5)
median_ln_prob = np.median(sampler.flatlnprobability)
cl_list = []
weights_list = []
weights_sum = 0
for cl_it in range(np.max(labels)+1):
cl_list.append(posterior_cluster(
sampler.flatchain[labels == cl_it, :],
sampler.flatlnprobability[labels == cl_it]
- median_ln_prob))
weights_sum += cl_list[-1].weight
weights_list.append(cl_list[-1].weight)
for i in range(N_walkers):
cluster = np.random.choice(np.max(labels)+1,
p=weights_list/np.sum(weights_list))
index = int(np.random.uniform()
* len(cl_list[cluster]))
pos[i, :] = cl_list[cluster].data[index, :]
if prune_plot and mpl_present:
ax1 = fig.add_subplot(222)
ax1.scatter(sampler.flatchain[:, 1],
sampler.flatchain[:, 2], color='0.5', s=1)
ax1.scatter(pos[:, 1], pos[:, 2],
color=self.colors[labels].tolist(), s=3)
ax1.set_xlim(right=3.5, left=4.5)
ax1.set_ylim(bottom=5., top=2.)
ax1 = fig.add_subplot(224)
ax1.scatter(sampler.flatchain[:, 1],
sampler.flatlnprobability, color='0.5',
s=1)
ax1.scatter(pos[:, 1], last_prob,
color=self.colors[labels].tolist(), s=3)
ax1.set_xlim(right=3.5, left=4.5)
plt.tight_layout(pad=0.2, w_pad=0.1, h_pad=0.6)
plt.savefig("prune.pdf")
sampler.reset()
if self.verbose_chain:
# proper run
for i, (pos, prob, rstate) in enumerate(
sampler.sample(pos, iterations=(iterations-burn_in),
storechain=False)):
if i % thin == 0:
start = int(i/thin*N_walkers)
end = int((i/thin+1)*N_walkers)
self.feh_chain[start:end] = pos[:, 0]
self.Teff_chain[start:end] = pos[:, 1]
self.logg_chain[start:end] = pos[:, 2]
self.dist_mod_chain[start:end] = pos[:, 3]
self.logA_chain[start:end] = pos[:, 4]
self.RV_chain[start:end] = pos[:, 5]
self.prob_chain[start:end] = prob
self.itnum_chain[start:end] = i
self.accept_chain[start:end] = sampler.acceptance_fraction
iso_obj = self.isochrones.query(pos[:, 0],
pos[:, 1],
pos[:, 2])
# A=np.exp(pos[:,4])
for band in self.mag:
self.photom_chain[band][start:end] = (
iso_obj.abs_mag[band])
else:
pos, last_prob, state = sampler.run_mcmc(pos, iterations-burn_in,
thin=thin)
self.feh_chain = sampler.flatchain[:, 0]
self.Teff_chain = sampler.flatchain[:, 1]
self.logg_chain = sampler.flatchain[:, 2]
self.dist_mod_chain = sampler.flatchain[:, 3]
self.logA_chain = sampler.flatchain[:, 4]
self.RV_chain = sampler.flatchain[:, 5]
self.prob_chain = sampler.flatlnprobability
self.MCMC_run = True
# ==============================================================
# Functions to work with emcee sampler
# EnsembleSampler version
def emcee_ES_init(self, N_temps, N_walkers, chain_length):
""" emcee_ES_init(N_walkers, chain_length)
Initialises the emcee walkers for the ensemble
sampler.
Parameters
----------
N_walkers : int
The number of walkers to be used.
chain_length: int
The length of the MCMC chain
"""
self.start_params = np.zeros([N_temps, N_walkers, 6])
guess_set = []
guess_set.append([0., 3.663, 4.57, 0., 0., 3.1]) # K4V
guess_set.append([0., 3.672, 4.56, 0., 0., 3.1]) # K3V
guess_set.append([0., 3.686, 4.55, 0., 0., 3.1]) # K2V
guess_set.append([0., 3.695, 4.55, 0., 0., 3.1]) # K1V
guess_set.append([0., 3.703, 4.57, 0., 0., 3.1]) # K0V
guess_set.append([0., 3.720, 4.55, 0., 0., 3.1]) # G8V
guess_set.append([0., 3.740, 4.49, 0., 0., 3.1]) # G5V
guess_set.append([0., 3.763, 4.40, 0., 0., 3.1]) # G2V
guess_set.append([0., 3.774, 4.39, 0., 0., 3.1]) # G0V
guess_set.append([0., 3.789, 4.35, 0., 0., 3.1]) # F8V
guess_set.append([0., 3.813, 4.34, 0., 0., 3.1]) # F5V
guess_set.append([0., 3.845, 4.26, 0., 0., 3.1]) # F2V
guess_set.append([0., 3.863, 4.28, 0., 0., 3.1]) # F0V
guess_set.append([0., 3.903, 4.26, 0., 0., 3.1]) # A7V
guess_set.append([0., 3.924, 4.22, 0., 0., 3.1]) # A5V
guess_set.append([0., 3.949, 4.20, 0., 0., 3.1]) # A3V
guess_set.append([0., 3.961, 4.16, 0., 0., 3.1]) # A2V
# guess_set.append([0.,3.763 ,3.20 ,0.,0.,3.1]) #G2III
# guess_set.append([0.,3.700 ,2.75 ,0.,0.,3.1]) #G8III
# guess_set.append([0.,3.663 ,2.52 ,0.,0.,3.1]) #K1III
# guess_set.append([0.,3.602 ,1.25 ,0.,0.,3.1]) #K5III
# guess_set.append([0.,3.591 ,1.10 ,0.,0.,3.1]) #M0III
guess_set.append([0., 3.760, 4.00, 0., 0., 3.1]) # horizontal branch
guess_set.append([0., 3.720, 3.80, 0., 0., 3.1]) #
guess_set.append([0., 3.680, 3.00, 0., 0., 3.1]) #
guess_set.append([0., 3.700, 2.75, 0., 0., 3.1]) # G8III
guess_set.append([0., 3.680, 2.45, 0., 0., 3.1]) #
guess_set.append([0., 3.600, 1.20, 0., 0., 3.1]) # K5III
guess_set.append([0., 3.580, 0.30, 0., 0., 3.1]) # K5III
guess_set = np.array(guess_set)
iso_objs = self.isochrones.query(guess_set[:, 0], guess_set[:, 1],
guess_set[:, 2])
guess_set[:, 4] = (
np.log(((self.mag[self.init_bands[0]]
- self.mag[self.init_bands[1]])
- (iso_objs.abs_mag[self.init_bands[0]]
- iso_objs.abs_mag[self.init_bands[1]])
) / (iso_objs.AX1[self.init_bands[0]]
[np.arange(guess_set.shape[0]), 11]
- iso_objs.AX1[self.init_bands[1]]
[np.arange(guess_set.shape[0]), 11])))
guess_set[:, 3] = (self.mag[self.init_bands[0]]
- (iso_objs.AX1[self.init_bands[0]]
[np.arange(guess_set.shape[0]), 11]
* guess_set[:, 4]
+ iso_objs.AX2[self.init_bands[0]]
[np.arange(guess_set.shape[0]), 11]
* guess_set[:, 4] * guess_set[:, 4]
+ iso_objs.abs_mag[self.init_bands[0]]))
metal_min = sorted(self.isochrones.metal_dict.keys())[0]
metal_max = sorted(self.isochrones.metal_dict.keys())[-1]
for it1 in range(N_temps):
for it2 in range(N_walkers):
self.start_params[it1, it2, :] = (
guess_set[int(np.random.uniform()*len(guess_set))])
self.start_params[it1, it2, 0] = (
metal_min + (metal_max-metal_min) * np.random.uniform())
self.start_params[it1, it2, 5] = (
2.9 + 0.4*np.random.uniform())
self.Teff_chain = np.zeros(chain_length)
self.logg_chain = np.zeros(chain_length)
self.feh_chain = np.zeros(chain_length)
self.dist_mod_chain = np.zeros(chain_length)
self.logA_chain = np.zeros(chain_length)
self.RV_chain = np.zeros(chain_length)
self.prob_chain = np.zeros(chain_length)
self.prior_chain = np.zeros(chain_length)
self.Jac_chain = np.zeros(chain_length)
self.accept_chain = np.zeros(chain_length)
self.photom_chain = {}
for band in self.mag:
self.photom_chain[band] = np.zeros(chain_length)
self.itnum_chain = np.zeros(chain_length)
def emcee_ES_run(self, iterations=10000, thin=10, burn_in=2000,
N_temps=4, N_walkers=12, prune=True,
prune_plot=False, verbose_chain=True):
""" emcee_run(iterations=10000, thin=10, burn_in=2000,
N_walkers=50, prune=True, prune_plot=False,
verbose_chain=True)
Runs the emcee based inference of the posterior using
the ensemble sampler.
Parameters
----------
iterations : int, optional
The number of iterations of emcee that will be
performed.
thin : int, optional
A thinning factor that results in only 1 in
every *thin* iterations being stored to the chain.
burn_in : int, optinal
Sets the length of the burn-in, during which
nothing is retained to the chain.
N_walkers : int, optinal
The number of walkers to be used.
prune : bool, optional
Determines whether a pruning of obviously
'lost' walkers is performed at the end of burn-in.
These walkers are then dropped back onto
randomly chosen 'good' walkers.
prune_plot : bool, optional
Produce a diagnostic plot showing what has
happened during the pruning.
verbose_chain : bool, optional
Provides the option to store the state of a
greater variety of parameters in the chain.
"""
self.verbose_chain = verbose_chain
self.emcee_ES_init(N_temps, N_walkers,
int((iterations-burn_in)/thin*N_walkers))
sampler = emcee.PTSampler(N_temps, N_walkers, 6, photom_prob,
lambda(x): np.zeros(x.shape[1]),
loglargs=[self], a=1.5)
# Burn-in
pos, last_prob, state = sampler.run_mcmc(self.start_params,
burn_in)
sampler.reset()
if prune:
dbscan = sk_c.DBSCAN(eps=0.05)
# pruning set
pos, last_prob, state = sampler.run_mcmc(pos, 100,
rstate0=state,
lnprob0=last_prob)
dbscan.fit(sampler.flatchain[:, 1:2])
labels = dbscan.labels_.astype(np.int)
if prune_plot and mpl_present:
fig = plt.figure()
ax1 = fig.add_subplot(221)
ax1.scatter(sampler.flatchain[:, 1],
sampler.flatchain[:, 2], color='0.5', s=1)
ax1.scatter(pos[:, 1], pos[:, 2],
color=self.colors[labels].tolist(), s=3)
ax1.set_xlim(right=3.5, left=4.5)
ax1.set_ylim(bottom=5., top=2.)
ax1 = fig.add_subplot(223)
ax1.scatter(sampler.flatchain[:, 1],
sampler.flatlnprobability, color='0.5',
s=1)
ax1.scatter(pos[:, 1], last_prob,
color=self.colors[labels].tolist(), s=3)
ax1.set_xlim(right=3.5, left=4.5)
median_ln_prob = np.median(sampler.flatlnprobability)
cl_list = []
weights_list = []
weights_sum = 0
for cl_it in range(np.max(labels)+1):
cl_list.append(posterior_cluster(
sampler.flatchain[labels == cl_it, :],
sampler.flatlnprobability[labels == cl_it]
- median_ln_prob))
weights_sum += cl_list[-1].weight
weights_list.append(cl_list[-1].weight)
for i in range(N_walkers):
cluster = np.random.choice(np.max(labels)+1,
p=weights_list/np.sum(weights_list))
index = int(np.random.uniform()*len(cl_list[cluster]))
pos[i, :] = cl_list[cluster].data[index, :]
if prune_plot and mpl_present:
ax1 = fig.add_subplot(222)
ax1.scatter(sampler.flatchain[:, 1],
sampler.flatchain[:, 2], color='0.5', s=1)
ax1.scatter(pos[:, 1], pos[:, 2],
color=self.colors[labels].tolist(), s=3)
ax1.set_xlim(right=3.5, left=4.5)
ax1.set_ylim(bottom=5., top=2.)
ax1 = fig.add_subplot(224)
ax1.scatter(sampler.flatchain[:, 1],
sampler.flatlnprobability, color='0.5',
s=1)
ax1.scatter(pos[:, 1], last_prob,
color=self.colors[labels].tolist(), s=3)
ax1.set_xlim(right=3.5, left=4.5)
plt.tight_layout(pad=0.2, w_pad=0.1, h_pad=0.6)
plt.savefig("prune.pdf")
sampler.reset()
if self.verbose_chain:
for i, (pos, prob, rstate) in enumerate(
sampler.sample(pos, iterations=(iterations-burn_in),
storechain=False)): # proper run
if i % thin == 0:
start = int(i/thin*N_walkers)
end = int((i/thin+1)*N_walkers)
self.feh_chain[start:end] = pos[0, :, 0]
self.Teff_chain[start:end] = pos[0, :, 1]
self.logg_chain[start:end] = pos[0, :, 2]
self.dist_mod_chain[start:end] = pos[0, :, 3]
self.logA_chain[start:end] = pos[0, :, 4]
self.RV_chain[start:end] = pos[0, :, 5]
self.prob_chain[start:end] = prob[0, :]
self.itnum_chain[start:end] = i
self.accept_chain[start:end] = (
sampler.acceptance_fraction[0, :])
iso_obj = self.isochrones.query(pos[0, :, 0],
pos[0, :, 1],
pos[0, :, 2])
# A=np.exp(pos[:,4])
for band in self.mag:
self.photom_chain[band][start:end] = (
iso_obj.abs_mag[band])
else:
pos, last_prob, state = sampler.run_mcmc(pos, iterations-burn_in,
thin=thin)
self.feh_chain = sampler.flatchain[:, 0]
self.Teff_chain = sampler.flatchain[:, 1]
self.logg_chain = sampler.flatchain[:, 2]
self.dist_mod_chain = sampler.flatchain[:, 3]
self.logA_chain = sampler.flatchain[:, 4]
self.RV_chain = sampler.flatchain[:, 5]
self.prob_chain = sampler.flatlnprobability
self.MCMC_run = True
# ==============================================================
# Fit Gaussians
def gmm_fit(self, max_components=10, verbose=False):
""" gmm_fit(max_components=10)
Fit a Gaussian mixture model to the (marginalised)
MCMC chain in (disance_moduls, ln extinction,
extinction law) space.
Parameters
__________
max_components, int, optional
The maximum size of the GMM (in terms of
number of components) that will be fit.
verbose : bool, optional
Controls the verbosity of the function
Notes
_____
Uses the Bayes Information Criterion (BIC) to
select a number of componets, looking for a
good fit, whilst peanalising models with more
parameters.
"""
if self.MCMC_run:
fit_points = np.array([self.dist_mod_chain,
self.logA_chain, self.RV_chain]).T
# clean out any NaNs, infs, etc
fit_points = fit_points[np.all(np.isfinite(fit_points), axis=1)]
best_bic = +np.infty
for n_components in range(1, max_components+1):
gmm = sk_m.GaussianMixture(n_components=n_components,
covariance_type='full',
reg_covar=0.0001)
gmm.fit(fit_points)
if gmm.bic(fit_points) < best_bic-10:
best_bic = gmm.bic(fit_points)
self.best_gmm = gmm
if verbose:
print(n_components, best_bic, np.sort(gmm.weights_),
"*")
else:
if verbose:
print(n_components, gmm.bic(fit_points),
np.sort(gmm.weights_))
def gmm_sample(self, filename=None, num_samples=None):
""" gmm_sample(filename=None, num_samples=None)
Sample from the Gaussian mixture model that has
been fit to the data.
Parameters
----------
filename : string, optional
A file to which the samples will be written
num_samples : int, optional
The number of samples tha will be drawn. If
None, the number of samples matches the length
of the MCMC chain.
"""
if self.best_gmm:
if num_samples is None:
num_samples = self.prob_chain.size
components = np.random.choice(self.best_gmm.weights_.size,
p=self.best_gmm.weights_,
size=num_samples)
covar_roots = []
for covar in self.best_gmm._get_covars():
covar_roots.append(np.linalg.cholesky(covar))
self.gmm_sample = self.best_gmm.means_[components]
for it in range(components.size):
self.gmm_sample[it, :] += np.dot(
covar_roots[components[it]],
np.random.normal(size=(2, 1))).flatten()
if filename:
np.savetxt(filename, self.gmm_sample)
# ==============================================================
# Auxilary functions
def plot_MCMCsample(self):
""" plot_MCMCsample()
Plot the MCMC sample on the ln(s) ln(A) plane
on the screen.
"""
# Raise an error if matplotlib not available
if not mpl_present:
raise ImportError
fig = plt.figure()
ax1 = fig.add_subplot(111)
ax1.scatter(self.dist_mod_chain, self.logA_chain, marker='.')
plt.show()
def chain_dump(self, filename):
""" chain_dum(filename)
Dump the MCMC chain to a text file
Parameters
----------
filename : string
The file to which the chain will be written.
"""
X = [self.itnum_chain, self.Teff_chain, self.logg_chain,
self.feh_chain, self.dist_mod_chain, self.logA_chain,
self.RV_chain, self.prob_chain, self.prior_chain,
self.Jac_chain, self.accept_chain]
header_txt = ("N\tTeff\tlogg\tfeh\tdist_mod\tlogA\tRV\tlike\t"
"prior\tJac\taccept")
for band in self.photom_chain:
X.append(self.photom_chain[band])
header_txt += "\t{}".format(band)
X = np.array(X).T
header_txt += "\n"
np.savetxt(filename, X, header=header_txt)
def plot_MCMCsample_gaussians(self):
"""plot_MCMCsample_gaussians()
Plot MCMC sample overlaid with gaussian fit
in (distance modulus, ln extinction) space
to the screen.
"""
# Raise an error if matplotlib not available
if not mpl_present:
raise ImportError
fit_points = np.array([self.dist_mod_chain,
self.logA_chain]).T
Y_ = self.best_gmm.predict(fit_points)
fig = plt.figure()
ax1 = fig.add_subplot(111)
for it in range(self.best_gmm.weights_.size):
ax1.scatter(fit_points[Y_ == it, 0], fit_points[Y_ == it, 1],
marker='.', color=self.colors[it])
# Plot an ellipse to show the Gaussian component
v, w = linalg.eigh(self.best_gmm._get_covars()[it])
angle = np.arctan2(w[0][1], w[0][0])
angle = 180 * angle / np.pi # convert to degrees
v *= 4
ell = mpl.patches.Ellipse(self.best_gmm.means_[it], v[0],
v[1], 180 + angle,
ec=self.colors[it], fc='none',
lw=3)
ell.set_clip_box(ax1.bbox)
ell.set_alpha(.5)
ax1.add_artist(ell)
plt.show()
def compare_MCMC_hist(self):
""" compare_MCMC_hist()
Produce a plot that compares the GMM
approximation to the estimated posterior,
showing the contribution of each component.
"""
# Raise an error if matplotlib not available
if not mpl_present:
raise ImportError
fig = plt.figure()
ax1 = fig.add_subplot(111)
bins = np.arange(8., 17.5, 0.25)
ax1.hist(self.dist_mod_chain, bins, histtype='step', ec='k')
x = np.arange(np.min(self.dist_mod_chain),
np.max(self.dist_mod_chain), 0.1)
y = np.zeros(x.size)
for it in range(self.best_gmm.weights_.size):
y += (1/np.sqrt(2*np.pi*self.best_gmm._get_covars()[it][0, 0])
* np.exp(-np.power(x-self.best_gmm.means_[it][0], 2)
/ (2*self.best_gmm._get_covars()[it][0, 0]))
* self.best_gmm.weights_[it])
y_it = (1/np.sqrt(2*np.pi*self.best_gmm._get_covars()[it][0, 0])
* np.exp(-np.power(x-self.best_gmm.means_[it][0], 2)
/ (2*self.best_gmm._get_covars()[it][0, 0]))
* self.dist_mod_chain.size*.25
* self.best_gmm.weights_[it])
ax1.plot(x, y_it, color=self.colors[it])
y *= self.dist_mod_chain.size*.25
ax1.plot(x, y, 'k--', linewidth=1.5)
plt.show()
class posterior_cluster:
""" A class to store clusters in posterior space
"""
def __init__(self, data, probs):
""" __init__(data, probs)
Initialise a cluster in posterior space.
Parameters
----------
data : ndarray(float)
The coordinates of the data points associated
with the cluster
probs : ndarray(float)
The probabilities of each of the data points
"""
self.data = data
self.probs = probs
self.set_weight()
def __len__(self):
""" __len__()
Gives the number of points in the cluster
Returns
-------
The number of points in the cluster
"""
return self.data.shape[0]
def set_weight(self, weight=None):
""" set_weight(weight=None)
Sets the probability weight of the cluster. If no
weight is provided, the weight is set to the mean
of the probabilities of each point in the cluster
multiplied by the standard deviation of the cluster
member positions (with a floor).
Parameters
----------
weight : float
The probaility weight of the cluster
"""
if weight:
self.weight = weight
else:
self.weight = (np.mean(np.exp(self.probs))
* max(np.std(self.data[:, 1]), 0.01)
* max(np.std(self.data[:, 2]), 0.01))
| bsd-3-clause |
SpaceKatt/CSPLN | apps/scaffolding/mac/web2py/web2py.app/Contents/Resources/lib/python2.7/matplotlib/fontconfig_pattern.py | 72 | 6429 | """
A module for parsing and generating fontconfig patterns.
See the `fontconfig pattern specification
<http://www.fontconfig.org/fontconfig-user.html>`_ for more
information.
"""
# Author : Michael Droettboom <[email protected]>
# License : matplotlib license (PSF compatible)
# This class is defined here because it must be available in:
# - The old-style config framework (:file:`rcsetup.py`)
# - The traits-based config framework (:file:`mpltraits.py`)
# - The font manager (:file:`font_manager.py`)
# It probably logically belongs in :file:`font_manager.py`, but
# placing it in any of these places would have created cyclical
# dependency problems, or an undesired dependency on traits even
# when the traits-based config framework is not used.
import re
from matplotlib.pyparsing import Literal, ZeroOrMore, \
Optional, Regex, StringEnd, ParseException, Suppress
family_punc = r'\\\-:,'
family_unescape = re.compile(r'\\([%s])' % family_punc).sub
family_escape = re.compile(r'([%s])' % family_punc).sub
value_punc = r'\\=_:,'
value_unescape = re.compile(r'\\([%s])' % value_punc).sub
value_escape = re.compile(r'([%s])' % value_punc).sub
class FontconfigPatternParser:
"""A simple pyparsing-based parser for fontconfig-style patterns.
See the `fontconfig pattern specification
<http://www.fontconfig.org/fontconfig-user.html>`_ for more
information.
"""
_constants = {
'thin' : ('weight', 'light'),
'extralight' : ('weight', 'light'),
'ultralight' : ('weight', 'light'),
'light' : ('weight', 'light'),
'book' : ('weight', 'book'),
'regular' : ('weight', 'regular'),
'normal' : ('weight', 'normal'),
'medium' : ('weight', 'medium'),
'demibold' : ('weight', 'demibold'),
'semibold' : ('weight', 'semibold'),
'bold' : ('weight', 'bold'),
'extrabold' : ('weight', 'extra bold'),
'black' : ('weight', 'black'),
'heavy' : ('weight', 'heavy'),
'roman' : ('slant', 'normal'),
'italic' : ('slant', 'italic'),
'oblique' : ('slant', 'oblique'),
'ultracondensed' : ('width', 'ultra-condensed'),
'extracondensed' : ('width', 'extra-condensed'),
'condensed' : ('width', 'condensed'),
'semicondensed' : ('width', 'semi-condensed'),
'expanded' : ('width', 'expanded'),
'extraexpanded' : ('width', 'extra-expanded'),
'ultraexpanded' : ('width', 'ultra-expanded')
}
def __init__(self):
family = Regex(r'([^%s]|(\\[%s]))*' %
(family_punc, family_punc)) \
.setParseAction(self._family)
size = Regex(r"([0-9]+\.?[0-9]*|\.[0-9]+)") \
.setParseAction(self._size)
name = Regex(r'[a-z]+') \
.setParseAction(self._name)
value = Regex(r'([^%s]|(\\[%s]))*' %
(value_punc, value_punc)) \
.setParseAction(self._value)
families =(family
+ ZeroOrMore(
Literal(',')
+ family)
).setParseAction(self._families)
point_sizes =(size
+ ZeroOrMore(
Literal(',')
+ size)
).setParseAction(self._point_sizes)
property =( (name
+ Suppress(Literal('='))
+ value
+ ZeroOrMore(
Suppress(Literal(','))
+ value)
)
| name
).setParseAction(self._property)
pattern =(Optional(
families)
+ Optional(
Literal('-')
+ point_sizes)
+ ZeroOrMore(
Literal(':')
+ property)
+ StringEnd()
)
self._parser = pattern
self.ParseException = ParseException
def parse(self, pattern):
"""
Parse the given fontconfig *pattern* and return a dictionary
of key/value pairs useful for initializing a
:class:`font_manager.FontProperties` object.
"""
props = self._properties = {}
try:
self._parser.parseString(pattern)
except self.ParseException, e:
raise ValueError("Could not parse font string: '%s'\n%s" % (pattern, e))
self._properties = None
return props
def _family(self, s, loc, tokens):
return [family_unescape(r'\1', str(tokens[0]))]
def _size(self, s, loc, tokens):
return [float(tokens[0])]
def _name(self, s, loc, tokens):
return [str(tokens[0])]
def _value(self, s, loc, tokens):
return [value_unescape(r'\1', str(tokens[0]))]
def _families(self, s, loc, tokens):
self._properties['family'] = [str(x) for x in tokens]
return []
def _point_sizes(self, s, loc, tokens):
self._properties['size'] = [str(x) for x in tokens]
return []
def _property(self, s, loc, tokens):
if len(tokens) == 1:
if tokens[0] in self._constants:
key, val = self._constants[tokens[0]]
self._properties.setdefault(key, []).append(val)
else:
key = tokens[0]
val = tokens[1:]
self._properties.setdefault(key, []).extend(val)
return []
parse_fontconfig_pattern = FontconfigPatternParser().parse
def generate_fontconfig_pattern(d):
"""
Given a dictionary of key/value pairs, generates a fontconfig
pattern string.
"""
props = []
families = ''
size = ''
for key in 'family style variant weight stretch file size'.split():
val = getattr(d, 'get_' + key)()
if val is not None and val != []:
if type(val) == list:
val = [value_escape(r'\\\1', str(x)) for x in val if x is not None]
if val != []:
val = ','.join(val)
props.append(":%s=%s" % (key, val))
return ''.join(props)
| gpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.